예제 #1
0
    def test_vectors(self):
        v1 = Vector((1, 2, 1, 2), 1, 1)
        self._event.add_vector(v1, (
            'test',
            0.55,
        ))

        self.assertEqual(self._event.vectors, [{
            'bbox': (2, 4, 1, 2),
            'probability': 0.55,
            'label': 'test'
        }], 'Vectors are invalid')
예제 #2
0
    def next_optimized_waypoint(self, cur_pos, cur_rot, pos_path,
                                cur_pos_index):
        """
        Returns the furthest point from path without an obstacle. Stops at the first position where the laser of
        nearest angle (laser angle ~= aimed position angle) detects an obstacle (laser distance < aimed position
        distance).
        :param cur_pos: current position of the robot
        :type cur_pos: Vector
        :param cur_rot: current orientation of the robot
        :type cur_rot: Quaternion
        :param pos_path: loaded path
        :type pos_path: list
        :param cur_pos_index: index of the current position in the path (-1
        :type cur_pos_index: int

        """
        lasers_angles = self.get_laser_scan_angles()
        lasers = self.get_laser_scan()['Echoes']

        max_lookahead_index = min(cur_pos_index + self.__max_lookahead - 1,
                                  len(pos_path) - 1)
        # Go through every position on the path starting at the position next to the current one and stopping at
        # max_lookahead_index positions further
        for i in range(cur_pos_index + 1, max_lookahead_index):
            tar_pos = pos_path[i]
            # convert potential aimed position to RCS
            rcs_tar_pos = pure_pursuit.convert_to_rcs(tar_pos, cur_pos,
                                                      cur_rot)

            # current robot position in RCS
            rcs_origin = Vector(0, 0, 0)
            # compute angle between current robot position and aimed position
            tar_angle = rcs_origin.get_angle(rcs_tar_pos)

            min_ind = 0
            min_dist = tar_angle - lasers_angles[0]
            # search for the nearest angle in laser_angles
            # could be simplified with a calculation instead of this iteration
            for j in range(1, len(lasers_angles)):
                dist = tar_angle - lasers_angles[j]
                if dist < min_dist:
                    min_dist = dist
                    min_ind = j

            # if the laser hits an obstacle, return the index of the previous position on the path
            if lasers[min_ind] < cur_pos.distance_to(tar_pos):
                return i - 1
        return max_lookahead_index
예제 #3
0
 def orientationPath(self, dict=False):
     """
     Parses the orientations in the loaded file into a list of either Quaternion instances or dictionaries depending on the
     value of the dict parameter.
     :param dict: if set to True, will parse into dictionaries instead of into Quaternion instances
     :type dict: bool
     """
     if dict:
         return [{
             'W': p['Pose']['Orientation']['W'],
             'X': p['Pose']['Orientation']['X'],
             'Y': p['Pose']['Orientation']['Y'],
             'Z': p['Pose']['Orientation']['Z']
         } for p in self.path]
     else:
         return [
             Quaternion(
                 p['Pose']['Orientation']['W'],
                 Vector(p['Pose']['Orientation']['X'],
                        p['Pose']['Orientation']['Y'],
                        p['Pose']['Orientation']['Z'])) for p in self.path
         ]
예제 #4
0
 def apply_one_vector(u):
     v = Vector(self.range.dim, 0)
     self.op.apply(u._impl, v)
     return v
예제 #5
0
 def zero_vector(self):
     return WrappedVector(Vector(self.dim, 0))
예제 #6
0
 def _copy_data(self):
     self._impl = Vector(self._impl)
예제 #7
0
 def setUp(self):
     self._vector = Vector((1, 2, 1, 2), 1, 1)
예제 #8
0
def greengrass_infinite_infer_run():
    try:
        # TODO: Parameterize model name
        model_name = 'deploy_ssd_resnet50_300'
        model_path = "/opt/awscam/artifacts/mxnet_{}_FP16_FUSED.xml".format(
            model_name)
        model_type = "ssd"

        # Trained image shape
        input_width = 300
        input_height = 300

        with open('docs/synset.txt', 'r') as _f:
            labels = dict(enumerate(_f, 1))

        client.publish(topic=iotTopic, payload="Labels loaded")

        # Start thread to write frames to disk
        results_thread = FIFO_Thread()
        results_thread.start()

        # MxNet Model
        model = awscam.Model(model_path, {"GPU": 1})

        client.publish(topic=iotTopic, payload="Model loaded")

        while True:
            ret, frame = awscam.getLastFrame()
            if ret == False:
                raise Exception("Failed to get frame from the stream")

            inferred_obj = InferEvent(frame, model_name, model_type)

            frameResize = cv2.resize(frame, (input_width, input_height))

            # Run inference
            output = model.doInference(frameResize)
            parsed_results = model.parseResult(model_type, output)['ssd']

            # Parse and document vectors
            for obj in parsed_results:
                if obj['prob'] > 0.5:
                    vector = Vector((
                        obj['xmin'],
                        obj['xmax'],
                        obj['ymin'],
                        obj['ymax'],
                    ), inferred_obj.xscale, inferred_obj.yscale)
                    inferred_obj.add_vector(vector, (
                        labels[obj['label']],
                        obj['prob'],
                    ))

                    cv2.rectangle(frame, (vector.xmin, vector.ymin),
                                  (vector.xmax, vector.ymax), (255, 165, 20),
                                  4)
                    cv2.putText(
                        frame, '{}: {:.2f}'.format(labels[obj['label']],
                                                   obj['prob']),
                        (vector.xmin, vector.ymin - 15),
                        cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 165, 20), 4)

            # Trigger event pipeline
            client.publish(topic=iotTopic, payload=str(inferred_obj))

            # Update output
            global jpeg
            ret, jpeg = cv2.imencode('.jpg', frame)
    except Exception as e:
        client.publish(topic=iotTopic, payload=str(e))