示例#1
0
    def __init__(self, sid, log, img_width=None, img_height=None):

        # Setup log
        self.log = log

        # Setup general parameters
        self.movement_key = f'movement--{sid}'
        self.img_width = img_width
        self.img_height = img_height

        # Arduino setup
        log.info("Connecting to Arduino...")
        self.a = pyfirmata.Arduino('/dev/ttyS0')

        # Start iterator thread so serial buffer doesn't overflow
        iter8 = pyfirmata.util.Iterator(self.a)
        iter8.start()

        # Assign pins
        self.servo_x = self.a.get_pin('d:9:s')
        self.servo_y = self.a.get_pin('d:10:s')
        self.spray_pin = self.a.get_pin('d:5:o')

        # Reference for servo control
        self.servo_x_max = 90
        self.servo_y_max = 60
        self.servo_x_rest = self.servo_x_max / 2
        self.servo_y_rest = self.servo_y_max / 2

        log.info("Connected to Arduino and pins configured")

        # Alternative RPi spray pin setup
        self.pi_spray = DigitalOutputDevice(pin=23,
                                            active_high=True,
                                            initial_value=False)

        # Servo parameters
        self.spray_per_plant = float(
            util.get_setting('SPRAY_PER_PLANT'))  # seconds
        self.spray_total_time = float(
            util.get_setting('SPRAY_TOTAL_TIME'))  # seconds
        self.spray_angle_rate = float(
            util.get_setting('SPRAY_ANGLE_RATE'))  # degrees per second

        # Multiplier to get from distance to angle, may change this to tan(angle) = dist / height
        self.spray_dist2angle = float(util.get_setting('SPRAY_DIST2ANGLE'))

        # Set default servo positions
        time.sleep(1)
        self.servo_x.write(self.servo_x_rest)
        self.servo_y.write(self.servo_y_rest)
示例#2
0
    def start_capture(self, spray_queue):
        """
        Start scheduled capture from the camera at a defined framerate.
        """
        self.log.info('Starting frame capture.')

        start_time = time.time()
        frame_wait = 1 / int(util.get_setting('FRAMERATE_TRACK'))

        frame_count = 0

        while len(spray_queue.queue):
            # Capture frame
            self.cam.capture(self.raw_cap, format='bgr')
            frame = self.raw_cap.array

            # Clear frame buffer for next frame
            self.raw_cap.truncate(0)

            # Check frame was correctly read
            if frame is None:
                if util.get_setting('DEBUG_CAM'):
                    # Reload the video stream
                    self.cam = cv2.VideoCapture(
                        str(Path(__file__).parent.absolute() / '0000.mkv'))

                    continue

                else:
                    raise Exception('Unable to capture a frame!')

            # If first capture, save to first_frame, otherwise add to frame buffer
            if self.first_frame is None:
                self.first_frame = frame
            else:
                self.frame_buffer.put(cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY))

            if str(util.get_setting('DEBUG_TRACK')).lower() in [
                    'true', 't', '1'
            ]:
                while os.path.isfile(f'raw/{frame_count:04}.jpg'):
                    frame_count += 1

                self.write_image(frame, f'raw/{frame_count:04}.jpg')
                frame_count += 1

            # Wait until next frame should be captured
            time.sleep(frame_wait - ((time.time() - start_time) % frame_wait))
示例#3
0
    def run(self, device):
        adb = os.path.join(project.get_sdk_dir(), "platform-tools", "adb")
        activity = get_setting("sublimeandroid_default_activity", "")
        if not activity:
            activity = project.get_activity_main()

        opts = {"cmd": [adb, "-s", device, "shell", "am", "start", "-n", activity], "working_dir": project.get_path()}
        self.window.run_command("android_exec", opts)
示例#4
0
def get_path():
    """Gets android project path from one of the top level folders in sublime project.

    TODO there are instances where a project may contain subprojects and
    even where sublime may be used in a fashion to include multiple top-level
    folders to show multiple projects. It would be nice to support these cases.

    Returns:
        String pointing to absolute path of android project root.
    """
    p = get_setting("sublimeandroid_project_path", "")
    if p:
        log.debug("Returning project path from settings")
        return p

    view = sublime.active_window().active_view()

    # check if view has already been mapped to an android project
    if view is not None and _project_map.get(view.id(), None) is not None:
        log.debug("Returning cached response for view %s: %s", view.id(), _project_map[view.id()])
        return _project_map[view.id()]

    # Use active file to traverse upwards and locate project
    if view is not None and view.file_name():
        folder = os.path.dirname(view.file_name())
        while folder != "/":  # TODO fix for windows root path
            android_manifest = os.path.join(folder, "AndroidManifest.xml")
            project_properties = os.path.join(folder, "project.properties")
            if os.path.isfile(android_manifest) and os.path.isfile(project_properties):
                log.info("Found project from active file %s. %s", view.file_name(), folder)
                _project_map[view.id()] = folder
                return folder
            folder = os.path.abspath(os.path.join(folder, ".."))

    # inspect project folders to locate root android project
    #
    # Using sublime project folders has less precedent than the current viewsince this will
    # simply return the first android project found in the list of folders. This is not
    # contrained by a `view is None` check as it is meant to run in case the current view
    # is outside of the sublime project.
    #
    # BUG this could be buggy if tests are including in project root but sublime allows you
    # to add a subfolder of a project folder as another project folder. (phew!)
    for folder in sublime.active_window().folders():
        a = os.path.join(folder, "local.properties")
        b = os.path.join(folder, "project.properties")
        if os.path.isfile(a) and os.path.isfile(b):
            log.info("Found project from sublime folder %s.", folder)
            if view is not None:
                _project_map[view.id()] = folder
            return folder

    log.info("Android project path not found.")
示例#5
0
    def run(self, callbacks, opts={}):
        self.callbacks = callbacks
        self.opts = opts

        devices, options = get_devices()
        self.devices = devices

        if len(options) == 0:
            sublime.status_message("ADB: No device attached!")
        elif len(options) == 1 and get_setting("sublimeandroid_device_select_default", True):
            self.on_done(0)  # run default
        else:
            self.window.show_quick_panel(options, self.on_done)
示例#6
0
def get_sdk_dir():
    """Determine path of sdk dir.

    Check if setting exists to point to sdk dir, otherwise use
    local.properties of detected android project.
    """
    sdk_dir = get_setting("sublimeandroid_sdk_dir", "")
    if sdk_dir:
        return sdk_dir
    p = get_path()
    f = open(os.path.join(p, "local.properties"))
    s = f.read()
    f.close()
    return re.search("^sdk\.dir=(.*)\n", s, re.MULTILINE).groups()[0]
示例#7
0
    def get_inference(self, img):
        """
        Send an image to the inference server and return the result.

        @return
        bbox        json list of bounding boxes
        """
        url = inference_url + '/api/detect'

        # Prepare headers for http request
        headers = {'content-type': 'image/jpeg'}

        # Encode image as jpeg
        _, img_encoded = cv2.imencode('.jpg', img)

        # Send http request with image and receive response
        try:
            response = requests.post(
                url,
                data=img_encoded.tostring(),
                headers=headers,
                timeout=int(util.get_setting('INFERENCE_TIMEOUT')) / 1000)

            if response.status_code == 200:
                # Convert bbox from [x1, y1, x2, y2] to [x, y, w, h]
                bbox = json.loads(response.text)

                for i in range(bbox['count']):
                    box = bbox['bounding_boxes'][i]

                    # Find width and height
                    w = box[2] - box[0]
                    h = box[3] - box[1]

                    # Apply back to original variable
                    bbox['bounding_boxes'][i][2] = w
                    bbox['bounding_boxes'][i][3] = h

                return bbox

            else:
                self.log.error(
                    f'Unexpected status code: {response.status_code}')
                return None

        except requests.exceptions.ReadTimeout as e:
            self.log.debug(e)

            return None
示例#8
0
    def __init__(self, sid, log):
        # Setup logs
        self.log = log

        # Redis configuration
        self.movement_key = f'movement--{sid}'

        # Setup camera
        if util.get_setting('DEBUG_CAM'):
            self.cam = cv2.VideoCapture(
                str(Path(__file__).parent.absolute() / '0000.mkv'))
        else:
            # self.cam = cv2.VideoCapture(0)
            self.cam = PiCamera()
            self.cam.resolution = (640, 480)
            self.raw_cap = PiRGBArray(self.cam, size=(640, 480))

        self.active = False

        self.frame_buffer = Queue()
        self.clear_buffer()
示例#9
0
import re, os, sys, string, util

tmpfile_path=""
if util.get_setting('tmpfile_path'):
        tmpfile_path=util.get_setting('tmpfile_path')

def create_pattern_aac(sequence,klasse):
	pattern = []
	for i in range (1,2):
		input = []
		G_count= 0.0
		A_count= 0.0
		V_count= 0.0
		L_count= 0.0
		I_count= 0.0
		C_count= 0.0
		M_count= 0.0
		F_count= 0.0
		Y_count= 0.0
		W_count= 0.0
		P_count= 0.0
		S_count= 0.0
		T_count= 0.0
		N_count= 0.0
		Q_count= 0.0
		D_count= 0.0
		E_count= 0.0
		H_count= 0.0
		K_count= 0.0
		R_count= 0.0
		for k in range (0,len(sequence)):
示例#10
0
文件: spray.py 项目: XDGFX/SPRAYai
    def start_spraying(self):
        """
        Main logic for overall spray program.
        """

        # Check if already spraying
        if len(spray_queue.queue):
            self.log.info('Already spraying!')
            return

        # Clear the queue in a thread-safe manner
        with spray_queue.mutex:
            spray_queue.queue.clear()

        # Put something in the queue to trigger spraying
        spray_queue.put(True)

        inference_wait = 1 / float(util.get_setting('FRAMERATE_INFERENCE'))

        # Enable frame capture
        t_cap = Thread(target=self.cam.start_capture, args=(spray_queue, ))
        t_cap.start()

        # Start tracking movement
        t_track = Thread(target=self.cam.start_track, args=(spray_queue, ))
        t_track.start()

        prev_point = (0, 0)

        start_time = time.time()

        if util.get_setting('DEBUG_TRACK'):
            import shutil

            for path in ['original', 'corrected', 'motion', 'raw']:
                if not os.path.isdir(path):
                    # shutil.rmtree(path)
                    os.mkdir(path)

            frame_count = 0

        self.log.info('Spraying...')

        # Keep spraying while spraying is active
        while len(spray_queue.queue):

            # Clear previous buffer
            self.cam.clear_buffer()

            # Wait for the first frame to capture
            while self.cam.first_frame is None:
                time.sleep(0.01)

            # Perform inference
            bbox = self.cam.get_inference(self.cam.first_frame)

            # Check that the request was successful
            if bbox is None:
                continue

            if str(util.get_setting('DEBUG_TRACK')).lower() in [
                    'true', 't', '1'
            ]:
                # Save frames for debugging
                while os.path.isfile(f'original/{frame_count:04}.jpg'):
                    frame_count += 1

                self.log.debug(f'Saving frame: {frame_count}')

                if bbox['count'] == 0:
                    self.cam.write_image(self.cam.first_frame,
                                         f'original/{frame_count:04}.jpg')
                else:
                    original_inference = self.cam.draw_bounding_boxes(
                        self.cam.first_frame, bbox)
                    bbox = self.servo.correct_bbox(bbox)
                    corrected_inference = self.cam.draw_bounding_boxes(
                        self.cam.frame_buffer.get(), bbox)

                    self.cam.write_image(original_inference,
                                         f'original/{frame_count:04}.jpg')
                    self.cam.write_image(corrected_inference,
                                         f'corrected/{frame_count:04}.jpg')

                frame_count += 1

            # Check if any detections were made
            if bbox['count'] == 0:
                self.log.debug(
                    'No detections found! Not bothering to continue this frame.'
                )

                # Wait until next frame should be captured
                time.sleep(inference_wait -
                           ((time.time() - start_time) % inference_wait))

                continue

            self.log.info(bbox)

            # Convert bounding boxes to centre points to spray
            original_points = [
                self.servo.bbox2centre(bbox['bounding_boxes'][i])
                for i in range(bbox['count'])
            ]

            # Order points starting at the bottom (largest y)
            ordered_points = sorted(original_points,
                                    key=lambda point: point[1],
                                    reverse=True)

            # Spray each point
            total_spray_start_time = time.time()
            for point in ordered_points:

                # Check if spraying has been disabled
                if not len(spray_queue.queue):
                    break

                # Check that spray time has not been exceeded
                if time.time(
                ) > total_spray_start_time + self.servo.spray_total_time:
                    self.log.warning(
                        'Ran out of time to spray all plants in this image')
                    self.log.warning(
                        f'Consider increasing SPRAY_TOTAL_TIME if possible (currently: {self.servo.spray_total_time}s)'
                    )
                    break

                # Do this twice to ensure initial spray is relatively accurate
                for i in range(2):
                    # Make initial correction
                    new_point = self.servo.correct_point(point)

                    # Move sprayer to position
                    self.servo.goto_point(new_point, prev_point)
                    prev_point = new_point

                # Start spraying
                self.servo.spray(enable=True)

                spray_start_time = time.time()

                # Keep tracking while spraying
                while time.time(
                ) < spray_start_time + self.servo.spray_per_plant:
                    new_point = self.servo.correct_point(point)
                    self.servo.goto_point(new_point, prev_point)
                    prev_point = new_point

                # Stop spraying
                self.servo.spray(enable=False)

        # Stop spraying
        self.servo.spray(enable=False)

        # Terminate video stream
        self.cam.active = False

        # Wait for camera thread to terminate
        t_cap.join()

        # Wait for track thread to terminate
        t_track.join()
示例#11
0
    def start_track(self, spray_queue):
        """
        Track movement between each frame in the frame buffer using Lucas-Kanade Optical Flow.
        """
        self.track_err_count = 0

        frame_count = 0
        colours = np.random.randint(0, 255, (100, 3))

        while len(spray_queue.queue):
            # Make sure queue doesn't get too long
            queue_length = len(self.frame_buffer.queue)
            if queue_length > int(util.get_setting('FRAMERATE_TRACK')):
                self.log.warning(
                    f'Length of frame queue is getting long ({queue_length})!. Check that the processor is not overwhelmed.'
                )

            # Make sure track error count is not too long
            if self.track_err_count >= 3:
                self.log.error(
                    f'Unable to track {self.track_err_count} frames in a row! There may be an issue with the camera'
                )

            # If frame_buffer was recently cleared, wait for re-initialisation
            while (self.first_frame is None) or (len(self.frame_buffer.queue)
                                                 == 0):
                time.sleep(0.01)

            # Setup frames
            new_frame = self.frame_buffer.get()
            prev_frame = self.prev_frame if self.prev_frame is not None else cv2.cvtColor(
                self.first_frame, cv2.COLOR_BGR2GRAY)

            # Setup track points
            prev_pts = cv2.goodFeaturesToTrack(prev_frame,
                                               maxCorners=200,
                                               qualityLevel=0.01,
                                               minDistance=30,
                                               blockSize=3)

            # Calculate optical flow
            new_pts, status, err = cv2.calcOpticalFlowPyrLK(
                prev_frame, new_frame, prev_pts, None)

            # Sanity check
            assert prev_pts.shape == new_pts.shape

            # Filter only valid points
            idx = np.where(status == 1)[0]
            prev_pts = prev_pts[idx]
            new_pts = new_pts[idx]

            try:
                # Save frames for debugging
                if str(util.get_setting('DEBUG_TRACK')).lower() in [
                        'true', 't', '1'
                ]:
                    debug_frame = new_frame

                    mask = np.zeros_like(prev_frame)

                    for i, (new, old) in enumerate(zip(new_pts, prev_pts)):
                        a, b = new.ravel()
                        c, d = old.ravel()

                        mask = cv2.line(mask, (a, b), (c, d),
                                        colours[i].tolist(), 2)
                        debug_frame = cv2.circle(debug_frame, (a, b), 5,
                                                 colours[i].tolist(), -1)
                    debug_frame = cv2.add(debug_frame, mask)

                    while os.path.isfile(f'motion/{frame_count:04}.jpg'):
                        frame_count += 1

                    self.write_image(debug_frame,
                                     f'motion/{frame_count:04}.jpg')

                    frame_count += 1

                    # Find transformation matrix
                    m = cv2.estimateAffinePartial2D(prev_pts, new_pts)

                    # Extract traslation
                    dx = m[0][0][2]
                    dy = m[0][1][2]

                    # Extract rotation angle
                    da = np.arctan2(m[0][1][0], m[0][0][0])

                    # self.log.debug(
                    #     f'Movement: {dx:5.2f}:{dy:5.2f}:{da:5.2f}')

                    old_movement = json.loads(r.get(self.movement_key))
                    new_pos = tuple(
                        map(lambda i, j: i + j, old_movement, (dx, dy, da)))

                    r.set(self.movement_key, json.dumps(new_pos))

                    self.track_err_count = 0
                    self.prev_frame = new_frame

            except Exception as e:
                self.log.debug('Failed to calculate transformation', e)
                self.track_err_count += 1