Beispiel #1
0
class Main:
    def __init__(self, **kwargs):
        self.mainloop = gobject.MainLoop()
        self.pipeline = gst.Pipeline("pipeline")
        self.verbose = kwargs.get('verbose', True)
        self.debug = kwargs.get('debug', False)
        cam_width = kwargs.get('cam_width', 640)
        cam_height = kwargs.get('cam_height', 480)
        host = kwargs.get('host', '127.0.0.1')
        port = kwargs.get('port', 5000)
        h264 = kwargs.get('h264', False)
        self.marker_spotted = False
        self.image_processing = ImageProcessing(area_threshold=10)
        self.state_estimate = StateEstimationAltitude()
        self.autopilot = AutoPilot(self.state_estimate)
        self.position_controller = PositionController(self.autopilot,
                                                      self.state_estimate)
        if h264:
            self.videosrc = gst.parse_launch(
                'uvch264_src device=/dev/video0 name=src auto-start=true src.vfsrc'
            )
        else:
            self.videosrc = gst.element_factory_make('v4l2src', 'v4l2src')
        fps = 30
        self.vfilter = gst.element_factory_make("capsfilter", "vfilter")
        self.vfilter.set_property(
            'caps',
            gst.caps_from_string(
                'image/jpeg, width=%s, height=%s, framerate=20/1' %
                (str(cam_width), str(cam_height))))
        self.queue = gst.element_factory_make("queue", "queue")

        self.udpsink = gst.element_factory_make('udpsink', 'udpsink')
        self.rtpjpegpay = gst.element_factory_make('rtpjpegpay', 'rtpjpegpay')
        self.udpsink.set_property('host', host)
        self.udpsink.set_property('port', port)

        self.pipeline.add_many(self.videosrc, self.queue, self.vfilter,
                               self.rtpjpegpay, self.udpsink)
        gst.element_link_many(self.videosrc, self.queue, self.vfilter,
                              self.rtpjpegpay, self.udpsink)

        pad = next(self.queue.sink_pads())
        # Sending frames to onVideBuffer where openCV can do processing.
        pad.add_buffer_probe(self.onVideoBuffer)
        self.pipeline.set_state(gst.STATE_PLAYING)
        self.i = 0
        gobject.threads_init()
        context = self.mainloop.get_context()
        #previous_update = time.time()
        fpstime = time.time()
        while True:
            try:
                context.iteration(False)
                self.autopilot._read_sensors()
                if self.autopilot.auto_switch > 1700:
                    self.position_controller.headingHold()
                    self.position_controller.holdAltitude()
                    self.autopilot.send_control_commands()
                else:
                    self.position_controller.reset_targets()
                    print self.autopilot.print_commands()
            except KeyboardInterrupt:
                fps = self.i / (time.time() - fpstime)
                print 'fps %f ' % fps
                self.autopilot.dump_log()
                self.autopilot.disconnect_from_drone()

    def onVideoBuffer(self, pad, idata):
        try:
            image = np.asarray(
                bytearray(idata),
                dtype=np.uint8,
            )
            frame = cv2.imdecode(image, cv2.CV_LOAD_IMAGE_UNCHANGED)
            self.i += 1
            marker = self.image_processing.recognize_marker(frame)
            self.autopilot.update_marker(marker)
            return True
        except:
            return True
Beispiel #2
0
class Main:

    def __init__(self, **kwargs):
        self.mainloop = gobject.MainLoop()
        self.pipeline = gst.Pipeline("pipeline")
        self.verbose = kwargs.get('verbose', True)
        self.debug = kwargs.get('debug', False)
        cam_width = kwargs.get('cam_width', 640)
        cam_height = kwargs.get('cam_height', 480)
        host = kwargs.get('host', '127.0.0.1')
        port = kwargs.get('port', 5000)
        h264 = kwargs.get('h264', False)
        self.marker_spotted = False
        self.image_processing = ImageProcessing(area_threshold=10)
        self.state_estimate = StateEstimationAltitude()
        self.autopilot = AutoPilot(self.state_estimate)
        self.position_controller = PositionController(
            self.autopilot, self.state_estimate)
        if h264:
            self.videosrc = gst.parse_launch(
                'uvch264_src device=/dev/video0 name=src auto-start=true src.vfsrc')
        else:
            self.videosrc = gst.element_factory_make('v4l2src', 'v4l2src')
        fps = 30
        self.vfilter = gst.element_factory_make("capsfilter", "vfilter")
        self.vfilter.set_property('caps', gst.caps_from_string(
            'image/jpeg, width=%s, height=%s, framerate=20/1' % (str(cam_width), str(cam_height))))
        self.queue = gst.element_factory_make("queue", "queue")

        self.udpsink = gst.element_factory_make('udpsink', 'udpsink')
        self.rtpjpegpay = gst.element_factory_make('rtpjpegpay', 'rtpjpegpay')
        self.udpsink.set_property('host', host)
        self.udpsink.set_property('port', port)

        self.pipeline.add_many(
            self.videosrc,
            self.queue,
            self.vfilter,
            self.rtpjpegpay,
            self.udpsink)
        gst.element_link_many(
            self.videosrc,
            self.queue,
            self.vfilter,
            self.rtpjpegpay,
            self.udpsink)

        pad = next(self.queue.sink_pads())
        # Sending frames to onVideBuffer where openCV can do processing.
        pad.add_buffer_probe(self.onVideoBuffer)
        self.pipeline.set_state(gst.STATE_PLAYING)
        self.i = 0
        gobject.threads_init()
        context = self.mainloop.get_context()
        #previous_update = time.time()
        fpstime = time.time()
        while True:
            try:
                context.iteration(False)
                self.autopilot._read_sensors()
                if self.autopilot.auto_switch > 1700:
                    self.position_controller.headingHold()
                    self.position_controller.holdAltitude()
                    self.autopilot.send_control_commands()
                else:
                    self.position_controller.reset_targets()
                    print self.autopilot.print_commands()
            except KeyboardInterrupt:
                fps = self.i / (time.time() - fpstime)
                print 'fps %f ' % fps
                self.autopilot.dump_log()
                self.autopilot.disconnect_from_drone()

    def onVideoBuffer(self, pad, idata):
        try:
            image = np.asarray(
                bytearray(idata),
                dtype=np.uint8,
            )
            frame = cv2.imdecode(image, cv2.CV_LOAD_IMAGE_UNCHANGED)
            self.i += 1
            marker = self.image_processing.recognize_marker(frame)
            self.autopilot.update_marker(marker)
            return True
        except:
            return True
Beispiel #3
0
class OfflineVideo():

    def __init__(self, video_file="data/test_5/drone_eye.avi"):
        self.cap = cv2.VideoCapture(video_file)
        self.cap.set(cv.CV_CAP_PROP_FPS, 30)
        self.image_processing = ImageProcessing(area_treshold=300)
        self.writer = cv2.VideoWriter(filename="kalman_tracking5.avi", fps=30, frameSize=(
            320, 240), fourcc=cv.CV_FOURCC('M', 'J', 'P', 'G'))
        self.cam_altitude = []
        self.observations = []
        #self.autopilot = AutoPilot(simulate=True, thrust_step=30, pixel_threshold=10, cam_width=320, cam_height=240)
        # self.ras_MIN = np.array([150, 80, 80], np.uint8)
       # self.ras_MAX = np.array([175, 255, 255], np.uint8)

    def run(self):
        i = 0
        try:
            while True:
                flag, frame = self.cap.read()
                if i % 3 == 0:
                    marker = self.image_processing.recognize_marker(frame)
                    if marker:
                        self.cam_altitude.append(marker.z)
                        self.observations.append(marker)
                        cv2.circle(frame, (marker.x, marker.y), 5, 255, -1)
                       # print marker.rect
                        # print rect[]
                        x, y, w, h = cv2.boundingRect(marker.best_cnt)
                        cv2.rectangle(
                            frame, (x, y), (x + w, y + h), (0, 255, 0), 2)
                        cv2.putText(frame, 'altitude %s ' % marker.z, (
                            20, 30), cv2.FONT_HERSHEY_PLAIN, 2, (0, 255, 0))
                        cv2.putText(frame, 'X: %s Y: %s ' %
                                    (str(marker.x), str(marker.y)),
                                     (20, 50),
                                     cv2.FONT_HERSHEY_PLAIN,
                                     2,
                                     (0, 255, 0))
                    else:
                        self.cam_altitude.append(None)
                        self.observations.append(None)
                i += 1
                #time.sleep(0.3)
                self.writer.write(frame)
                cv2.imshow('drone eye', frame)
                cv2.waitKey(5)
        except:
            # draw estimates
            pickle.dump(self.cam_altitude, open('cam_alt.dump', 'wb'))
            pickle.dump(
                self.observations,
                open('marker_observations5.dump', 'wb'))
            # pl.figure(size=(320,240))
            x = [o.x for o in self.observations if o]
            y = [o.y for o in self.observations if o]
            # obs_scatter = pl.scatter(x, y, marker='x', color='b',
            #             label='observations')

            position_line = pl.plot(x, y,
                                    linestyle='-', marker='o', color='r',
                                    label='position est.')
            #lines_true = pl.plot(self.cam_altitude, color='b')
           # observations = pl.plot(x, color='b')
            # lines_filt = pl.plot(filtered_state_means, color='r')
            # pl.legend((lines_true[0]), ('Camera altitude'))
            pl.show()