示例#1
0
class App(QApplication):     
    def __init__(self, *args, **kwargs):
        self.conf = kwargs.pop('conf')
        super(App, self).__init__(*args, **kwargs)
        self.video_widget = None
        self.video_thread = None
        self.meta_img = None
        self.form = None

        
    def set_video_widget(self, video_widget, conf = {}):
        self.video_widget = video_widget
        self.video_thread = VideoThread(video_widget, conf)
        if self.form:
            self.video_thread.set_separation_threshold(self.form.threshold.value())
        
    def set_form(self, form):
        self.form = form
        self.form.key_press.connect(self.on_key_press)
        self.form.key_release.connect(self.on_key_release)
        self.form.destroyed.connect(self.on_exit)
        self.form.show_mask.stateChanged.connect(self.on_show_mask)
        self.form.threshold.valueChanged.connect(self.on_thresh_change)
        if self.video_thread:
            self.video_thread.set_separation_threshold(self.form.threshold.value())
            
    def on_key_press(self, key):
        if key and key == Qt.Key_Alt:
            self.on_detail_prev_show()
        elif key and key == Qt.Key_Control:
            self.on_snap()
        
    def on_key_release(self, key):
        if key and key == Qt.Key_Alt:
            self.on_detail_prev_hide()
            
    def on_exit(self):
        self.video_thread.kill()
        
    def on_show_mask(self, state):
        if state == Qt.Checked:
            self.video_thread.show_mask()
        else:
            self.video_thread.hide_mask()
            
    def on_thresh_change(self, value):
        self.video_thread.set_separation_threshold(value)
        
    def on_detail_prev_show(self):
        self.video_thread.show_detail()
        
    def on_detail_prev_hide(self):
        self.video_thread.hide_detail()
        
    def on_snap(self):
        frame, downscaled = self.video_thread.snap()
        sizes = self._measure(downscaled)
        self._show_save_dialog(frame, downscaled, sizes)
        
    def _measure(self, img):
        meta_img = MetaImg(img, {})
        calib_filter = CalibrateRedDotFilter({'dot_diameter_in_mm': CONFIG.get('dot_diameter_in_mm', 1)})
        sep_filter = SeparateObjectFilter()
        calib_filter(meta_img)
        sep_filter(meta_img)
        w = meta_img.meta['ellipsis'][1][0] * meta_img.meta['mm_on_px']
        h = meta_img.meta['ellipsis'][1][1] * meta_img.meta['mm_on_px']
        return w,h
    
    def _show_save_dialog(self, frame, downscaled, sizes):
        self.save_dialog = QDialog()
        self.save_dialog.ui = SaveDialog(downscaled, sizes)
        self.save_dialog.ui.setupUi(self.save_dialog)
        self.save_dialog.show()
        self.save_dialog.accepted.connect(lambda: self._save_image_data(frame, sizes))
        
    def _save_image_data(self, frame, sizes):
        no = self._parse_last_measure_no()
        no += 1
        label = str(self.form.class_name.toPlainText())
        label = re.sub(r'[^\w]', '', label) or 'no_label'
        img_name = '%d_%s.png' % (no, label)
        path = self.conf.get('save_folder', './')
        path = os.path.join(path, img_name)
        cv2.imwrite(path, frame)
        
        self._save_measure(no, label, sizes, img_name)
        
    def _parse_last_measure_no(self):
        path = self.conf.get('save_folder', './')
        try:
            f = open(os.path.join(path, self.conf['measure_file']), 'r')
        except IOError:
            return 0
        
        for last_line in f:
            pass
        f.close()
        
        if last_line:
            return int(last_line.split(',')[0])
        else:
            return 0
        
    def _save_measure(self, no, label, sizes, img_name):
        path = self.conf.get('save_folder', './')
        f = open(os.path.join(path, self.conf['measure_file']), 'a+')
        f.write("%d,%s,%.3f,%.3f,%s\n" % (no, label, sizes[0], sizes[1], img_name))
        f.close()
        
                        
        
    def start(self):
        assert(self.video_widget)
        assert(self.form)
        self.video_thread.start()
示例#2
0
class App(QApplication):
    def __init__(self, *args, **kwargs):
        self.conf = kwargs.pop('conf')
        super(App, self).__init__(*args, **kwargs)
        self.video_widget = None
        self.video_thread = None
        self.meta_img = None
        self.form = None

    def set_video_widget(self, video_widget, conf={}):
        self.video_widget = video_widget
        self.video_thread = VideoThread(video_widget, conf)
        if self.form:
            self.video_thread.set_separation_threshold(
                self.form.threshold.value())

    def set_form(self, form):
        self.form = form
        self.form.key_press.connect(self.on_key_press)
        self.form.key_release.connect(self.on_key_release)
        self.form.destroyed.connect(self.on_exit)
        self.form.show_mask.stateChanged.connect(self.on_show_mask)
        self.form.threshold.valueChanged.connect(self.on_thresh_change)
        if self.video_thread:
            self.video_thread.set_separation_threshold(
                self.form.threshold.value())

    def on_key_press(self, key):
        if key and key == Qt.Key_Alt:
            self.on_detail_prev_show()
        elif key and key == Qt.Key_Control:
            self.on_snap()

    def on_key_release(self, key):
        if key and key == Qt.Key_Alt:
            self.on_detail_prev_hide()

    def on_exit(self):
        self.video_thread.kill()

    def on_show_mask(self, state):
        if state == Qt.Checked:
            self.video_thread.show_mask()
        else:
            self.video_thread.hide_mask()

    def on_thresh_change(self, value):
        self.video_thread.set_separation_threshold(value)

    def on_detail_prev_show(self):
        self.video_thread.show_detail()

    def on_detail_prev_hide(self):
        self.video_thread.hide_detail()

    def on_snap(self):
        frame, downscaled = self.video_thread.snap()
        sizes = self._measure(downscaled)
        self._show_save_dialog(frame, downscaled, sizes)

    def _measure(self, img):
        meta_img = MetaImg(img, {})
        calib_filter = CalibrateRedDotFilter(
            {'dot_diameter_in_mm': CONFIG.get('dot_diameter_in_mm', 1)})
        sep_filter = SeparateObjectFilter()
        calib_filter(meta_img)
        sep_filter(meta_img)
        w = meta_img.meta['ellipsis'][1][0] * meta_img.meta['mm_on_px']
        h = meta_img.meta['ellipsis'][1][1] * meta_img.meta['mm_on_px']
        return w, h

    def _show_save_dialog(self, frame, downscaled, sizes):
        self.save_dialog = QDialog()
        self.save_dialog.ui = SaveDialog(downscaled, sizes)
        self.save_dialog.ui.setupUi(self.save_dialog)
        self.save_dialog.show()
        self.save_dialog.accepted.connect(
            lambda: self._save_image_data(frame, sizes))

    def _save_image_data(self, frame, sizes):
        no = self._parse_last_measure_no()
        no += 1
        label = str(self.form.class_name.toPlainText())
        label = re.sub(r'[^\w]', '', label) or 'no_label'
        img_name = '%d_%s.png' % (no, label)
        path = self.conf.get('save_folder', './')
        path = os.path.join(path, img_name)
        cv2.imwrite(path, frame)

        self._save_measure(no, label, sizes, img_name)

    def _parse_last_measure_no(self):
        path = self.conf.get('save_folder', './')
        try:
            f = open(os.path.join(path, self.conf['measure_file']), 'r')
        except IOError:
            return 0

        for last_line in f:
            pass
        f.close()

        if last_line:
            return int(last_line.split(',')[0])
        else:
            return 0

    def _save_measure(self, no, label, sizes, img_name):
        path = self.conf.get('save_folder', './')
        f = open(os.path.join(path, self.conf['measure_file']), 'a+')
        f.write("%d,%s,%.3f,%.3f,%s\n" %
                (no, label, sizes[0], sizes[1], img_name))
        f.close()

    def start(self):
        assert (self.video_widget)
        assert (self.form)
        self.video_thread.start()
示例#3
0
def main():
    # Initialize frame rate calculation
    frame_rate_calc = 1
    freq = cv2.getTickFrequency()
    font = cv2.FONT_HERSHEY_SIMPLEX

    objectifier = Model()

    # Start serial connection to arduino
    ser = serial.Serial('/dev/ttyACM0', 9600, timeout=2)
    time.sleep(1)

    # Initialize queues
    pic_q = queue.LifoQueue(5)
    command_q = queue.Queue()
    # Inizialize grid anf gridmovement
    grid = Grid(8,8)
    movement = GridMovement(grid, ser)
    # Initialize VideoThread
    vt = VideoThread(pic_q, objectifier)
    vt.start()
    
    # Setup GPIO
    GPIO.setmode(GPIO.BOARD)
    GPIO.setup(BUTTONPIN, GPIO.IN, pull_up_down=GPIO.PUD_UP)
    GPIO.setup(CONTACT_PIN, GPIO.IN, pull_up_down=GPIO.PUD_UP)
    GPIO.setup(LEDPIN, GPIO.OUT, initial=GPIO.LOW)
    
    # Keep track of movements after approach is called
    approach_movement_list = queue.LifoQueue()

    wait_for_button(GPIO)
    time.sleep(2)
    
    #
    # RUN ROUND
    #

    print("Starting round")

    begin_round(movement, pic_q)

    # map the targets from json file
    map_JSON(mar1.json,movement)
    # now set the maximum amount of obstacles based on amount of targets 
    grid.set_obstacles_max()
    
    begin_round(movement, pic_q)

    print("I will try and map the mothership")
    map_mothership(movement, pic_q)
    print("Mothership is located in the following tiles: ", grid.mothership)

    # We can save these values in movement.access_point so other functions with access to movement
    # can use these
    mothership_angle, dist, side_angle = approach_mothership_side(movement, pic_q, ser, GPIO)
    movement.set_mothership_angle(mothership_angle)
    movement.set_side_angle(side_angle)
    movement.set_access_dist(dist)

    print("Mothership angle: {}, Distance: {}, Side_angle: {}".format(mothership_angle, dist, side_angle))

    print("Going home")
    go_home(movement, pic_q)

    while grid.targets:
        # find closest target and set it as the goal
        movement.current_target = closest_point(grid.targets, movement.current)
        movement.set_goal(movement.current_target)
        follow_path(movement, pic_q)

        # Once we reach the target we attempt to pick it up
        approach(movement, pic_q)
        success, target_id = check_pick_up(movement, pic_q)
        print("Success: {}, Target Id: {}".format(success, target_id))

        if not success:
            go_home(movement, pic_q)
            
        else:
            grid.targets.remove(movement.current_target)
            # We would correct alignment here if errors were fixed
        
            # Move to mothership and drop target
            print("Access point is: ",movement.get_access_point())
            movement.set_goal(movement.get_access_point())
            follow_path(movement, pic_q, True)
            movement.face(movement.get_side_point())
            print("Going to drop it")
            mothership_drop(dist, mothership_angle, side_angle, target_id, movement, serial, pic_q)
        
        go_home(movement, pic_q)

    # Once all targets are delivered, go home and turn on finishing light
    go_home(movement, pic_q)
    wait_for_button(GPIO)
    
    # close video thread
    vt.join()