Ejemplo n.º 1
0
def driving_position(data):
    '''
    Moves the robot to the driving position
    '''

    # Make the element we'll be using to run the demo
    run_element = Element("run_demo")

    # turn on control
    set_control(run_element, True)

    res = run_element.command_send(
        "robot_api",
        "trajectory_fastest", {
            "t": [["joint_0", [-1.29638671875]], ["joint_1", [140]],
                  ["joint_2", [-140]], ["joint_3", [0]], ["joint_4", [0]],
                  ["joint_5", [0]]],
            "v":
            VEL,
            "a":
            ACCEL
        },
        serialize=True)
    if (res['err_code'] != 0):
        return Response(err_code=1,
                        err_str="Failed to move to driving position",
                        serialize=True)

    return Response("Success", serialize=True)
Ejemplo n.º 2
0
    def __init__(self,
                 mode="both",
                 input_size=512,
                 scaling_factor=2,
                 config_path="sd-maskrcnn/cfg/benchmark.yaml"):
        self.element = Element("instance-segmentation")
        self.input_size = input_size
        self.scaling_factor = scaling_factor
        self.config_path = config_path
        self.mode = mode
        # Streaming of masks is disabled by default to prevent consumption of resources
        self.stream_enabled = False

        config = tf.ConfigProto()
        config.gpu_options.per_process_gpu_memory_fraction = 0.5
        config.gpu_options.visible_device_list = "0"
        set_session(tf.Session(config=config))

        self.set_mode(b"both")
        # Initiate tensorflow graph before running threads
        self.get_masks()
        self.element.command_add("segment", self.segment, 10000)
        self.element.command_add("get_mode", self.get_mode, 100)
        self.element.command_add("set_mode", self.set_mode, 10000)
        self.element.command_add("stream", self.set_stream, 100)
        t = Thread(target=self.element.command_loop, daemon=True)
        t.start()
        self.publish_segments()
Ejemplo n.º 3
0
def run_demo(data):
    '''
    Runs the demo
    '''

    # Make the element we'll be using to run the demo
    run_element = Element("run_demo")

    # turn on control
    set_control(run_element, True)

    # Note that we haven't found the ball
    found = False

    # Run over the observation list
    for (x, y) in OBSERVATION_LIST:

        result = scan_height(run_element, x, y)
        if result is None:
            found = False
            continue

        found = True
        break

    # If we found the ball, go ahead and grab it
    if found:
        res = run_element.command_send(
            "ros",
            "move_to_pos", {
                "xyz":
                [result[0] + TRANSFORM_X, result[1] + TRANSFORM_Y, HEIGHT_Z],
                "rpy": [CAMERA_R, CAMERA_P, CAMERA_Y],
                "v": VEL,
                "a": ACCEL
            },
            serialize=True)
        if (res['err_code'] != 0):
            return Response(err_code=3,
                            err_str="Failed to move to grasp pos",
                            serialize=True)

    # Go home
    res = run_element.command_send("robot_api", "home", {}, serialize=True)
    if (res['err_code'] != 0):
        return Response(err_code=4,
                        err_str="Failed to go home",
                        serialize=True)

    # Based on whether or not we found the ball return an appropriate
    #   response
    if found:
        return Response("Success", serialize=True)
    else:
        return Response(err_code=5,
                        err_str="Failed to find the ball",
                        serialize=True)
Ejemplo n.º 4
0
    def __init__(self, element_name, width, height, fps, retry_delay):
        self._width = width
        self._height = height
        self._fps = fps
        self._retry_delay = retry_delay

        self._status_is_running = False
        self._status_lock = Lock()
        
        # Init element
        self._element = Element(element_name)
        self._element.healthcheck_set(self.is_healthy)
        #self._element.command_add(command_name, command_func_ptr, timeout, serialize)
        
        # Run command loop
        thread = Thread(target=self._element.command_loop, daemon=True)
        thread.start()
Ejemplo n.º 5
0
    def __init__(
        self,
        element_name,
        transform_file_path,
        calibration_client_path,
        depth_shape,
        color_shape,
        fps,
        disparity_shift,
        depth_units,
        rotation,
        retry_delay
    ):
        self._transform_file_path = transform_file_path
        self._calibration_client_path = calibration_client_path
        self._depth_shape = depth_shape
        self._color_shape = color_shape
        self._fps = fps
        self.disparity_shift = disparity_shift
        self.depth_units = depth_units
        self._rotation = rotation
        self._retry_delay = retry_delay

        self._status_is_running = False
        self._status_lock = Lock()
        self._pipeline = rs.pipeline()
        self._rs_pc = rs.pointcloud()
        self._transform = TransformStreamContract(x=0, y=0, z=0, qx=0, qy=0, qz=0, qw=1)
        self._transform_last_loaded = 0

        # Create an align object: rs.align allows us to perform alignment of depth frames to other frames
        self._align = rs.align(rs.stream.color)

        # Init element
        self._element = Element(element_name)
        self._element.healthcheck_set(self.is_healthy)
        self._element.command_add(
            CalculateTransformCommand.COMMAND_NAME,
            self.run_transform_estimator,
            timeout=2000,
            deserialize=CalculateTransformCommand.Request.SERIALIZE
        )

        # Run command loop
        thread = Thread(target=self._element.command_loop, daemon=True)
        thread.start()
Ejemplo n.º 6
0
def home(data):
    '''
    Moves the robot home
    '''

    # Make the element we'll be using to run the demo
    run_element = Element("run_demo")

    res = run_element.command_send("robot_api",
                                   "home", {
                                       "v": VEL,
                                       "a": ACCEL
                                   },
                                   serialize=True)
    if (res['err_code'] != 0):
        return Response(err_code=1,
                        err_str="Failed to move to home position",
                        serialize=True)

    # turn off control
    set_control(run_element, False)

    return Response("Success", serialize=True)
Ejemplo n.º 7
0
def main():
    parser = argparse.ArgumentParser(
        formatter_class=argparse.RawTextHelpFormatter)
    parser.add_argument('--credentials',
                        type=existing_file,
                        metavar='OAUTH2_CREDENTIALS_FILE',
                        default=os.path.join(os.path.expanduser('~/.config'),
                                             'google-oauthlib-tool',
                                             'credentials.json'),
                        help='Path to store and read OAuth2 credentials')
    parser.add_argument('--device_model_id',
                        type=str,
                        metavar='DEVICE_MODEL_ID',
                        required=True,
                        help='The device model ID registered with Google')

    parser.add_argument('-v',
                        '--version',
                        action='version',
                        version='%(prog)s ' + Assistant.__version_str__())

    args = parser.parse_args()
    with open(args.credentials, 'r') as f:
        credentials = google.oauth2.credentials.Credentials(token=None,
                                                            **json.load(f))

    # Set up the sound thread
    sound_queue = Queue()
    sound_thread = Process(target=sound_playback_thread, args=(sound_queue, ))
    sound_thread.start()

    # Set up the process that will play sounds for other processes. This
    #   is needed until we get a shared PulseAudio system up and running
    #   s.t. all elements can play their own sounds
    sound_element = Process(target=sound_element_thread, args=(sound_queue, ))
    sound_element.start()

    with Assistant(credentials, args.device_model_id) as assistant:

        # Create our element
        element = Element("voice")

        # Start the assistant
        events = assistant.start()

        for event in events:
            process_event(event, assistant, element, sound_queue)
Ejemplo n.º 8
0
def sound_element_thread(sound_queue):
    """
    Registers all of the sound playback commands for this element
    and handles when they're called
    """

    elem = Element("sound")

    # Register our callback to play sounds
    elem_class = SoundElement(sound_queue)
    elem.command_add("play_sound", elem_class.command_cb)
    elem.command_loop()
Ejemplo n.º 9
0
# atombot.py
from atom import Element
from atom.messages import Response
from threading import Thread
import pdb
import time

PUBLISH_FREQUENCY = 100
TIME_FOR_WAVEFORM = 5

if __name__ == "__main__":

    element = Element("voice_demo")

    # Wait for the record element to start up and launch the VNC.
    #   this can and should be fixed with a heartbeat!
    time.sleep(10)

    # Start the recording and wait for 5s
    data = {
        "name": "example",
        "t": TIME_FOR_WAVEFORM,
        "perm": False,
        "e": "waveform",
        "s": "serialized"
    }
    res = element.command_send("record", "start", data, serialize=True)
    time.sleep(TIME_FOR_WAVEFORM + 2)

    # Strings we'll recognize for the plotting commands. This is pretty
    #   rudimentary and can be improved with some better parsing/processing/NLP
Ejemplo n.º 10
0
class Realsense:

    def __init__(
        self,
        element_name,
        transform_file_path,
        calibration_client_path,
        depth_shape,
        color_shape,
        fps,
        disparity_shift,
        depth_units,
        rotation,
        retry_delay
    ):
        self._transform_file_path = transform_file_path
        self._calibration_client_path = calibration_client_path
        self._depth_shape = depth_shape
        self._color_shape = color_shape
        self._fps = fps
        self.disparity_shift = disparity_shift
        self.depth_units = depth_units
        self._rotation = rotation
        self._retry_delay = retry_delay

        self._status_is_running = False
        self._status_lock = Lock()
        self._pipeline = rs.pipeline()
        self._rs_pc = rs.pointcloud()
        self._transform = TransformStreamContract(x=0, y=0, z=0, qx=0, qy=0, qz=0, qw=1)
        self._transform_last_loaded = 0

        # Create an align object: rs.align allows us to perform alignment of depth frames to other frames
        self._align = rs.align(rs.stream.color)

        # Init element
        self._element = Element(element_name)
        self._element.healthcheck_set(self.is_healthy)
        self._element.command_add(
            CalculateTransformCommand.COMMAND_NAME,
            self.run_transform_estimator,
            timeout=2000,
            deserialize=CalculateTransformCommand.Request.SERIALIZE
        )

        # Run command loop
        thread = Thread(target=self._element.command_loop, daemon=True)
        thread.start()

    def is_healthy(self):
        """
        Reports whether the realsense is connected and streaming or not
        """
        try:
            self._status_lock.acquire()
            if self._status_is_running:
                return Response(err_code=0, err_str="Realsense online")
            else:
                return Response(err_code=1, err_str="Waiting for realsense")
        finally:
            self._status_lock.release()

    def load_transform_from_file(self, fname):
        """
        Opens specified file, reads transform, and returns as list.

        Args:
            fname (str): CSV file that stores the transform
        """
        with open(fname, "r") as f:
            transform_list = [float(v) for v in f.readlines()[-1].split(",")]
            return TransformStreamContract(
                x=transform_list[0],
                y=transform_list[1],
                z=transform_list[2],
                qx=transform_list[3],
                qy=transform_list[4],
                qz=transform_list[5],
                qw=transform_list[6]
            )

    def run_transform_estimator(self, *args):
        """
        Runs the transform estimation procedure, which saves the transform to disk.
        """
        process = subprocess.Popen(self._calibration_client_path, stderr=subprocess.PIPE)
        out, err = process.communicate()
        return Response(
            data=CalculateTransformCommand.Response().to_data(),
            err_code=process.returncode,
            err_str=err.decode(),
            serialize=CalculateTransformCommand.Response.SERIALIZE
        )

    def run_camera_stream(self):
        while True:
            try:
                # Try to establish realsense connection
                self._element.log(LogLevel.INFO, "Attempting to connect to Realsense")

                # Set disparity shift
                device = rs.context().query_devices()[0]
                advnc_mode = rs.rs400_advanced_mode(device)
                depth_table_control_group = advnc_mode.get_depth_table()
                depth_table_control_group.disparityShift = self.disparity_shift
                advnc_mode.set_depth_table(depth_table_control_group)

                # Attempt to stream accel and gyro data, which requires d435i
                # If we can't then we revert to only streaming depth and color
                try:
                    config = rs.config()
                    config.enable_stream(
                        rs.stream.depth, self._depth_shape[0], self._depth_shape[1], rs.format.z16, self._fps
                    )
                    config.enable_stream(
                        rs.stream.color, self._color_shape[0], self._color_shape[1], rs.format.bgr8, self._fps
                    )
                    config.enable_stream(rs.stream.accel)
                    config.enable_stream(rs.stream.gyro)
                    profile = self._pipeline.start(config)
                    is_d435i = True
                except RuntimeError:
                    config = rs.config()
                    config.enable_stream(
                        rs.stream.depth, self._depth_shape[0], self._depth_shape[1], rs.format.z16, self._fps
                    )
                    config.enable_stream(
                        rs.stream.color, self._color_shape[0], self._color_shape[1], rs.format.bgr8, self._fps
                    )
                    profile = self._pipeline.start(config)
                    is_d435i = False

                # Set depth units
                depth_sensor = profile.get_device().first_depth_sensor()
                depth_sensor.set_option(rs.option.depth_units, self.depth_units)

                # Publish intrinsics
                rs_intrinsics = profile.get_stream(rs.stream.color).as_video_stream_profile().get_intrinsics()
                intrinsics = IntrinsicsStreamContract(
                    width=rs_intrinsics.width,
                    height=rs_intrinsics.height,
                    ppx=rs_intrinsics.ppx,
                    ppy=rs_intrinsics.ppy,
                    fx=rs_intrinsics.fx,
                    fy=rs_intrinsics.fy
                )
                self._element.entry_write(
                    IntrinsicsStreamContract.STREAM_NAME,
                    intrinsics.to_dict(),
                    serialize=IntrinsicsStreamContract.SERIALIZE,
                    maxlen=self._fps
                )

                try:
                    self._status_lock.acquire()
                    self._status_is_running = True
                finally:
                    self._status_lock.release()

                self._element.log(LogLevel.INFO, "Realsense connected and streaming")
                while True:
                    start_time = time.time()

                    frames = self._pipeline.wait_for_frames()
                    aligned_frames = self._align.process(frames)
                    depth_frame = aligned_frames.get_depth_frame()
                    color_frame = aligned_frames.get_color_frame()

                    # Validate that frames are valid
                    if not depth_frame or not color_frame:
                        continue

                    # Generate realsense pointcloud
                    self._rs_pc.map_to(color_frame)
                    points = self._rs_pc.calculate(depth_frame)

                    # Convert data to numpy arrays
                    depth_image = np.asanyarray(depth_frame.get_data())
                    color_image = np.asanyarray(color_frame.get_data())
                    vertices = np.asanyarray(points.get_vertices())

                    vertices = vertices.view(np.float32).reshape(vertices.shape + (-1,))

                    if self._rotation is not None:
                        depth_image = np.rot90(depth_image, k=self._rotation / 90)
                        color_image = np.rot90(color_image, k=self._rotation / 90)
                        # TODO: Apply rotation to pointcloud

                    _, color_serialized = cv2.imencode(".tif", color_image)
                    _, depth_serialized = cv2.imencode(".tif", depth_image)
                    _, pc_serialized = cv2.imencode(".tif", vertices)

                    if is_d435i:
                        accel = frames[2].as_motion_frame().get_motion_data()
                        gyro = frames[3].as_motion_frame().get_motion_data()
                        accel_data = AccelStreamContract(x=accel.x, y=accel.y, z=accel.z)
                        gyro_data = GyroStreamContract(x=gyro.x, y=gyro.y, z=gyro.z)
                        self._element.entry_write(
                            AccelStreamContract.STREAM_NAME,
                            accel_data.to_dict(),
                            serialize=AccelStreamContract.SERIALIZE,
                            maxlen=self._fps
                        )
                        self._element.entry_write(
                            GyroStreamContract.STREAM_NAME,
                            gyro_data.to_dict(),
                            serialize=GyroStreamContract.SERIALIZE,
                            maxlen=self._fps
                        )

                    color_contract = ColorStreamContract(data=color_serialized.tobytes())
                    depth_contract = DepthStreamContract(data=depth_serialized.tobytes())
                    pc_contract = PointCloudStreamContract(data=pc_serialized.tobytes())
                    self._element.entry_write(
                        ColorStreamContract.STREAM_NAME,
                        color_contract.to_dict(),
                        serialize=ColorStreamContract.SERIALIZE,
                        maxlen=self._fps
                    )
                    self._element.entry_write(
                        DepthStreamContract.STREAM_NAME,
                        depth_contract.to_dict(),
                        serialize=DepthStreamContract.SERIALIZE,
                        maxlen=self._fps
                    )
                    self._element.entry_write(
                        PointCloudStreamContract.STREAM_NAME,
                        pc_contract.to_dict(),
                        serialize=PointCloudStreamContract.SERIALIZE,
                        maxlen=self._fps
                    )

                    # Load transform from file if the file exists
                    # and has been modified since we last checked
                    if os.path.exists(self._transform_file_path):
                        transform_last_modified = os.stat(self._transform_file_path).st_mtime
                        if transform_last_modified > self._transform_last_loaded:
                            try:
                                self._transform = self.load_transform_from_file(self._transform_file_path)
                                self._transform_last_loaded = time.time()
                            except Exception as e:
                                self._element.log(LogLevel.ERR, str(e))
                    self._element.entry_write(
                        TransformStreamContract.STREAM_NAME,
                        self._transform.to_dict(),
                        serialize=TransformStreamContract.SERIALIZE,
                        maxlen=self._fps
                    )

                    time.sleep(max(1 / self._fps - (time.time() - start_time), 0))

            except:
                self._element.log(LogLevel.INFO, "Camera loop threw exception: %s" % (sys.exc_info()[1]))
            finally:
                # If camera fails to init or crashes, update status and retry connection
                try:
                    self._status_lock.acquire()
                    self._status_is_running = False
                finally:
                    self._status_lock.release()

                try:
                    self._pipeline.stop()
                except:
                    pass
                time.sleep(self._retry_delay)
Ejemplo n.º 11
0
    def __init__(self):
        self.element = Element(f"atom-cli_{uname().nodename}_{uuid4().hex}")
        self.indent = 2
        self.style = Style.from_dict({
            "logo_color": "#6039C8",
        })
        self.session = PromptSession(style=self.style)
        self.print_atom_os_logo()
        self.serialization = "msgpack"
        self.cmd_map = {
            "help": self.cmd_help,
            "list": self.cmd_list,
            "records": self.cmd_records,
            "command": self.cmd_command,
            "read": self.cmd_read,
            "exit": self.cmd_exit,
            "serialization": self.cmd_serialization,
        }
        self.usage = {
            "cmd_help": cleandoc("""
                Displays available commands and shows usage for commands.

                Usage:
                  help [<command>]"""),

            "cmd_list": cleandoc("""
                Displays available elements, streams, or commands.
                Can filter streams and commands based on element.

                Usage:
                  list elements
                  list streams [<element>]
                  list commands [<element>]"""),

            "cmd_records": cleandoc("""
                Displays log records or command and response records.
                Can filter records from the last N seconds or from certain elements.

                Usage:
                  records log [<last_N_seconds>] [<element>...]
                  records cmdres [<last_N_seconds>] <element>..."""),

            "cmd_command": cleandoc("""
                Sends a command to an element and displays the response.

                Usage:
                  command <element> <element_command> [<data>]"""),

            "cmd_read": cleandoc("""
                Displays the entries of an element's stream.
                Can provide a rate to print the entries for ease of reading.

                Usage:
                  read <element> <stream> [<rate_hz>]"""),

            "cmd_exit": cleandoc("""
                Exits the atom-cli tool.
                Can also use the shortcut CTRL+D.

                Usage:
                  exit"""),

            "cmd_serialization": cleandoc("""
                Sets serialization/deserialization setting to either use msgpack,
                Apache arrow, or no (de)serialization. Defaults to msgpack serialization.
                This setting is overriden by deserialization keys received in stream.

                Usage:
                  serialization (msgpack | arrow | none)"""),
        }
Ejemplo n.º 12
0
class AtomCLI:

    def __init__(self):
        self.element = Element(f"atom-cli_{uname().nodename}_{uuid4().hex}")
        self.indent = 2
        self.style = Style.from_dict({
            "logo_color": "#6039C8",
        })
        self.session = PromptSession(style=self.style)
        self.print_atom_os_logo()
        self.serialization = "msgpack"
        self.cmd_map = {
            "help": self.cmd_help,
            "list": self.cmd_list,
            "records": self.cmd_records,
            "command": self.cmd_command,
            "read": self.cmd_read,
            "exit": self.cmd_exit,
            "serialization": self.cmd_serialization,
        }
        self.usage = {
            "cmd_help": cleandoc("""
                Displays available commands and shows usage for commands.

                Usage:
                  help [<command>]"""),

            "cmd_list": cleandoc("""
                Displays available elements, streams, or commands.
                Can filter streams and commands based on element.

                Usage:
                  list elements
                  list streams [<element>]
                  list commands [<element>]"""),

            "cmd_records": cleandoc("""
                Displays log records or command and response records.
                Can filter records from the last N seconds or from certain elements.

                Usage:
                  records log [<last_N_seconds>] [<element>...]
                  records cmdres [<last_N_seconds>] <element>..."""),

            "cmd_command": cleandoc("""
                Sends a command to an element and displays the response.

                Usage:
                  command <element> <element_command> [<data>]"""),

            "cmd_read": cleandoc("""
                Displays the entries of an element's stream.
                Can provide a rate to print the entries for ease of reading.

                Usage:
                  read <element> <stream> [<rate_hz>]"""),

            "cmd_exit": cleandoc("""
                Exits the atom-cli tool.
                Can also use the shortcut CTRL+D.

                Usage:
                  exit"""),

            "cmd_serialization": cleandoc("""
                Sets serialization/deserialization setting to either use msgpack,
                Apache arrow, or no (de)serialization. Defaults to msgpack serialization.
                This setting is overriden by deserialization keys received in stream.

                Usage:
                  serialization (msgpack | arrow | none)"""),
        }

    def run(self):
        """
        The main loop of the CLI.
        Reads the user input, verifies the command exists and calls the command.
        """
        while True:
            try:
                inp = self.session.prompt(
                    "\n> ", auto_suggest=AutoSuggestFromHistory()).split(" ")
                if not inp:
                    continue
                command, args = inp[0], inp[1:]
                if command not in self.cmd_map.keys():
                    print("Invalid command. Type 'help' for valid commands.")
                else:
                    self.cmd_map[command](*args)
            # Handle CTRL+C so user can break loops without exiting
            except KeyboardInterrupt:
                pass
            # Exit on CTRL+D
            except EOFError:
                self.cmd_exit()
            except Exception as e:
                print(str(type(e)) + " " + str(e))

    def print_atom_os_logo(self):
        f = Figlet(font="slant")
        logo = f.renderText("ATOM OS")
        print(HTML(f"<logo_color>{logo}</logo_color>"), style=self.style)

    def format_record(self, record):
        """
        Takes a record out of Redis, decodes the keys and values (if possible)
        and returns a formatted json string sorted by keys.
        """
        formatted_record = {}
        for k, v in record.items():
            if type(k) is bytes:
                k = k.decode()
            if not self.serialization:
                try:
                    v = v.decode()
                except:
                    v = str(v)
            formatted_record[k] = v

        sorted_record = {k: v for k, v in sorted(
            formatted_record.items(), key=lambda x: x[0])}
        try:
            ret = json.dumps(sorted_record, indent=self.indent)
        except TypeError as te:
            ret = sorted_record
        finally:
            return ret

    def cmd_help(self, *args):
        usage = self.usage["cmd_help"]
        if len(args) > 1:
            print(usage)
            print("\nToo many arguments to 'help'.")
            return
        if args:
            # Prints the usage of the command
            if args[0] in self.cmd_map.keys():
                print(self.usage[f"cmd_{args[0]}"])
            else:
                print(f"Command {args[0]} does not exist.")
        else:
            print("Try 'help <command>' for usage on a command")
            print("Available commands:")
            for command in self.cmd_map.keys():
                print(f"  {command}")

    def cmd_list(self, *args):
        usage = self.usage["cmd_list"]
        mode_map = {
            "elements": self.element.get_all_elements,
            "streams": self.element.get_all_streams,
            "commands": self.element.get_all_commands
        }
        if not args:
            print(usage)
            print("\n'list' must have an argument.")
            return
        mode = args[0]
        if mode not in mode_map.keys():
            print(usage)
            print("\nInvalid argument to 'list'.")
            return
        if len(args) > 1 and mode == "elements":
            print(usage)
            print(f"\nInvalid number of arguments for command 'list elements'.")
            return
        if len(args) > 2:
            print(usage)
            print("\n'list' takes at most 2 arguments.")
            return
        items = mode_map[mode](*args[1:])
        if not items:
            print(f"No {mode} exist.")
            return
        for item in items:
            print(item)

    def cmd_records(self, *args):
        usage = self.usage["cmd_records"]
        if not args:
            print(usage)
            print("\n'records' must have an argument.")
            return
        mode = args[0]

        # Check for start time
        if len(args) > 1 and args[1].isdigit():
            ms = int(args[1]) * 1000
            start_time = str(int(self.element._get_redis_timestamp()) - ms)
            elements = set(args[2:])
        # If no start time, go from the very beginning
        else:
            start_time = "0"
            elements = set(args[1:])

        if mode == "log":
            records = self.mode_log(start_time, elements)
        elif mode == "cmdres":
            if not elements:
                print(usage)
                print(
                    "\nMust provide elements from which to get command response streams from.")
                return
            records = self.mode_cmdres(start_time, elements)
        else:
            print(usage)
            print("\nInvalid argument to 'records'.")
            return

        if not records:
            print("No records.")
            return
        for record in records:
            print(self.format_record(record))

    def mode_log(self, start_time, elements):
        """
        Reads the logs from Atom's log stream.

        Args:
            start_time (str): The time from which to start reading logs.
            elements (list): The elements on which to filter the logs for.
        """
        records = []
        all_records = self.element.entry_read_since(
            None, "log", start_time, serialization=None)
        for record in all_records:
            if not elements or record["element"].decode() in elements:
                record = {key: (value if isinstance(value, str) else value.decode(
                )) for key, value in record.items()}  # Decode strings only which are required to
                records.append(record)
        return records

    def mode_cmdres(self, start_time, elements):
        """
        Reads the command and response records from the provided elements.
        Args:
            start_time (str): The time from which to start reading logs.
            elements (list): The elements to get the command and response records from.
        """
        streams, records = [], []
        for element in elements:
            streams.append(self.element._make_response_id(element))
            streams.append(self.element._make_command_id(element))
        for stream in streams:
            cur_records = self.element.entry_read_since(
                None, stream, start_time, serialization=None)
            for record in cur_records:
                for key, value in record.items():
                    try:
                        if not isinstance(value, str):
                            value = value.decode()
                    except:
                        try:
                            value = ser.deserialize(value, method=self.serialization)
                        except:
                            pass

                    finally:
                        record[key] = value
                record["type"], record["element"] = stream.split(":")
                records.append(record)
        return sorted(records, key=lambda x: (x["id"], x["type"]))

    def cmd_command(self, *args):
        usage = self.usage["cmd_command"]
        if len(args) < 2:
            print(usage)
            print("\nToo few arguments.")
            return
        element_name = args[0]
        command_name = args[1]
        if len(args) >= 3:
            data = str(" ".join(args[2:]))
            if self.serialization:
                try:
                    data = json.loads(data)
                except:
                    print("Received improperly formatted data!")
                    return
        else:
            data = ""
        resp = self.element.command_send(element_name,
                                         command_name,
                                         data,
                                         serialize=(self.serialization is not None),
                                         deserialize=(self.serialization is not None),
                                         serialization=self.serialization)  # shouldn't be used if it's None
        print(self.format_record(resp))

    def cmd_read(self, *args):
        usage = self.usage["cmd_read"]
        if len(args) < 2:
            print(usage)
            print("\nToo few arguments.")
            return
        if len(args) > 3:
            print(usage)
            print("\nToo many arguments.")
            return
        if len(args) == 3:
            try:
                rate = float(args[2])
                if rate < 0:
                    raise ValueError()
            except ValueError:
                print("rate must be an float greater than 0.")
                return
        else:
            rate = None
        element_name, stream_name = args[:2]

        last_timestamp = None
        while True:
            start_time = time.time()
            entries = self.element.entry_read_n(element_name,
                                                stream_name,
                                                1,
                                                deserialize=(self.serialization is not None),
                                                serialization=self.serialization)  # shouldn't be used if it's None
            if not entries:
                print(f"No data from {element_name} {stream_name}.")
                return
            entry = entries[0]
            timestamp = entry["id"]
            # Only print the entry if it is different from the previous one
            if timestamp != last_timestamp:
                last_timestamp = timestamp
                print(self.format_record(entry))
            if rate:
                time.sleep(max(1 / rate - (time.time() - start_time), 0))

    def cmd_serialization(self, *args):
        usage = self.usage["cmd_serialization"]
        if (len(args) != 1):
            print(usage)
            print(f"\nPass one argument: {ser.Serializations.print_values()}.")
            return

        # Otherwise try to get the new setting
        if ser.is_valid_serialization(args[0].lower()):
            self.serialization = args[0].lower() if args[0].lower() != "none" else None
        else:
            print(f"\nArgument must be one of {ser.Serializations.print_values()}.")

        print("Current serialization status is {}".format(self.serialization))

    def cmd_exit(*args):
        print("Exiting.")
        sys.exit()
Ejemplo n.º 13
0
            pos_map[cur_pos] = self.atombot
            return_str = " ".join(pos_map)
            return return_str
        finally:
            self.bot_lock.release()

    def is_healthy(self):
        # This is an example health-check, which can be used to tell other elements that depend on you
        # whether you are ready to receive commands or not. Any non-zero error code means you are unhealthy.
        return Response(err_code=0, err_str="Everything is good")


if __name__ == "__main__":
    print("Launching...")
    # Create our element and call it "atombot"
    element = Element("atombot")

    # Instantiate our AtomBot class
    atombot = AtomBot()

    # We add a healthcheck to our atombot element.
    # This is optional. If you don't do this, atombot is assumed healthy as soon as its command_loop executes
    element.healthcheck_set(atombot.is_healthy)

    # This registers the relevant AtomBot methods as a command in the atom system
    # We set the timeout so the caller will know how long to wait for the command to execute
    element.command_add("move_left",
                        atombot.move_left,
                        timeout=50,
                        deserialize=True)
    element.command_add("move_right",
Ejemplo n.º 14
0
            buff = "{},".format(x_val)

            # And add each item from the iterable
            try:
                for v in val:
                    buff += "{},".format(v)
            except:
                buff += "{}".format(val)

            # Finish off with a newline and write to file
            buff += "\n"
            files[key].write(buff)

    # And note the success
    return Response("Success", serialize=True)


if __name__ == '__main__':
    elem = Element("record")
    elem.command_add("start", start_recording, timeout=1000, deserialize=True)
    elem.command_add("stop", stop_recording, timeout=1000, deserialize=True)
    elem.command_add("wait", wait_recording, timeout=60000, deserialize=True)
    elem.command_add("list", list_recordings, timeout=1000)
    elem.command_add("get", get_recording, timeout=1000, deserialize=True)
    elem.command_add("plot", plot_recording, timeout=1000000, deserialize=True)
    elem.command_add("csv", csv_recording, timeout=10000, deserialize=True)

    # Want to launch the plot thread s.t. our plot API can return quickly

    elem.command_loop()
Ejemplo n.º 15
0
def record_fn(name, n_entries, n_sec, perm, element, stream):
    '''
    Mainloop for a recording thread. Creates a new
    element with the proper name and listens on and
    records the stream until we're told to stop
    '''
    global active_recordings

    # Make an element from the name
    record_elem = Element("record_" + name)

    # Open the file for the recording
    filename = os.path.join(PERM_RECORDING_LOC if perm else TEMP_RECORDING_LOC,
                            name + RECORDING_EXTENSION)
    try:
        record_file = open(filename, 'wb')
    except:
        record_elem.log(LogLevel.ERR,
                        "Unable to open file {}".format(filename))
        del active_recordings[name]
        return

    # At the outer loop, we want to loop until we've been cancelled
    last_id = "$"
    intervals = 0
    entries_read = 0
    while name in active_recordings:

        # Read the data
        data = record_elem.entry_read_since(element,
                                            stream,
                                            last_id,
                                            n=n_entries,
                                            block=BLOCK_MS)

        # If we got no data, then we should finish up
        if len(data) == 0:
            record_elem.log(
                LogLevel.ERR,
                "Recording {}: no data after {} entries read!".format(
                    name, entries_read))
            break

        entries_read += len(data)

        # We're going to pack up each entry into a msgpack item and
        #   then write it to the file. If it's already msgpack'd
        #   that's totally fine, this will just pack up the keys and ID
        for entry in data:
            packed_data = msgpack.packb(entry, use_bin_type=True)

            # Write the packed data to file
            record_file.write(packed_data)

        # If n_entries is not none then we want to subtract
        #   off the number of entries left and perhaps break out
        if n_entries is not None:
            n_enties -= len(data)
            if (n_enties <= 0):
                break

        # Otherwise see if we've recorded for longer than our
        #   elapsed time
        else:
            intervals += 1
            if (intervals * POLL_INTERVAL) >= n_sec:
                break

        # If we got here, we should sleep for the interval before
        #   making the next call
        time.sleep(POLL_INTERVAL)

        # And update the last ID
        last_id = data[-1]["id"]

    # Once we're out of here we want to note that we're no longer
    #   active in the global system. It might be that someone else popped
    #   it out through already in the "stop" command
    if name in active_recordings:
        thread = active_recordings.pop(name)

    # And we want to close the file
    record_file.close()

    # And log that we completed the recording
    record_elem.log(
        LogLevel.INFO, "Finished recording {} with {} entries read".format(
            name, entries_read))
Ejemplo n.º 16
0
from atom import Element
from atom.messages import Response
import time
import math
import pdb

element = Element("python_demo")

### element name , command, data
res = elemend.command_send("robot_api", "control", True)
pdb.set_trace()
Ejemplo n.º 17
0
    run_element = Element("run_demo")

    res = run_element.command_send("robot_api",
                                   "home", {
                                       "v": VEL,
                                       "a": ACCEL
                                   },
                                   serialize=True)
    if (res['err_code'] != 0):
        return Response(err_code=1,
                        err_str="Failed to move to home position",
                        serialize=True)

    # turn off control
    set_control(run_element, False)

    return Response("Success", serialize=True)


if __name__ == '__main__':
    '''
    Mainloop, wait for the command to run the grab and then do
    the grab
    '''

    element = Element("demo")
    element.command_add("run", run_demo, timeout=60000)
    element.command_add("driving_position", driving_position, timeout=10000)
    element.command_add("home", home, timeout=10000)
    element.command_loop()
Ejemplo n.º 18
0
class SDMaskRCNNEvaluator:
    def __init__(self,
                 mode="both",
                 input_size=512,
                 scaling_factor=2,
                 config_path="sd-maskrcnn/cfg/benchmark.yaml"):
        self.element = Element("instance-segmentation")
        self.input_size = input_size
        self.scaling_factor = scaling_factor
        self.config_path = config_path
        self.mode = mode
        # Streaming of masks is disabled by default to prevent consumption of resources
        self.stream_enabled = False

        config = tf.ConfigProto()
        config.gpu_options.per_process_gpu_memory_fraction = 0.5
        config.gpu_options.visible_device_list = "0"
        set_session(tf.Session(config=config))

        self.set_mode(b"both")
        # Initiate tensorflow graph before running threads
        self.get_masks()
        self.element.command_add("segment", self.segment, 10000)
        self.element.command_add("get_mode", self.get_mode, 100)
        self.element.command_add("set_mode", self.set_mode, 10000)
        self.element.command_add("stream", self.set_stream, 100)
        t = Thread(target=self.element.command_loop, daemon=True)
        t.start()
        self.publish_segments()

    def get_mode(self, _):
        """
        Returns the current mode of the algorithm (both or depth).
        """
        return Response(self.mode)

    def set_mode(self, data):
        """
        Sets the mode of the algorithm and loads the corresponding weights.
        'both' means that the algorithm is considering grayscale and depth data.
        'depth' means that the algorithm only considers depth data.
        """
        mode = data.decode().strip().lower()
        if mode not in MODES:
            return Response(f"Invalid mode {mode}")
        self.mode = mode
        config = YamlConfig(self.config_path)
        inference_config = MaskConfig(config['model']['settings'])
        inference_config.GPU_COUNT = 1
        inference_config.IMAGES_PER_GPU = 1

        model_path = MODEL_PATHS[self.mode]
        model_dir, _ = os.path.split(model_path)
        self.model = modellib.MaskRCNN(mode=config['model']['mode'],
                                       config=inference_config,
                                       model_dir=model_dir)
        self.model.load_weights(model_path, by_name=True)
        self.element.log(LogLevel.INFO, f"Loaded weights from {model_path}")
        return Response(f"Mode switched to {self.mode}")

    def set_stream(self, data):
        """
        Sets streaming of segmented masks to true or false.
        """
        data = data.decode().strip().lower()
        if data == "true":
            self.stream_enabled = True
        elif data == "false":
            self.stream_enabled = False
        else:
            return Response(f"Expected bool, got {type(data)}.")
        return Response(f"Streaming set to {self.stream_enabled}")

    def inpaint(self, img, missing_value=0):
        """
        Fills the missing values of the depth data.
        """
        # cv2 inpainting doesn't handle the border properly
        # https://stackoverflow.com/questions/25974033/inpainting-depth-map-still-a-black-image-border
        img = cv2.copyMakeBorder(img, 1, 1, 1, 1, cv2.BORDER_DEFAULT)
        mask = (img == missing_value).astype(np.uint8)

        # Scale to keep as float, but has to be in bounds -1:1 to keep opencv happy.
        scale = np.abs(img).max()
        img = img.astype(
            np.float32) / scale  # Has to be float32, 64 not supported.
        img = cv2.inpaint(img, mask, 1, cv2.INPAINT_NS)

        # Back to original size and value range.
        img = img[1:-1, 1:-1]
        img = img * scale
        return img

    def normalize(self, img, max_dist=1000):
        """
        Scales the range of the data to be in 8-bit.
        Also shifts the values so that maximum is 255.
        """
        img = np.clip(img / max_dist, 0, 1) * 255
        img = np.clip(img + (255 - img.max()), 0, 255)
        return img.astype(np.uint8)

    def scale_and_square(self, img, scaling_factor, size):
        """
        Scales the image by scaling_factor and creates a border around the image to match size.
        Reducing the size of the image tends to improve the output of the model.
        """
        img = cv2.resize(img, (int(img.shape[1] / scaling_factor),
                               int(img.shape[0] / scaling_factor)),
                         interpolation=cv2.INTER_NEAREST)
        v_pad, h_pad = (size - img.shape[0]) // 2, (size - img.shape[1]) // 2
        img = cv2.copyMakeBorder(img, v_pad, v_pad, h_pad, h_pad,
                                 cv2.BORDER_REPLICATE)
        return img

    def unscale(self, results, scaling_factor, size):
        """
        Takes the results of the model and transforms them back into the original dimensions of the input image.
        """
        masks = results["masks"].astype(np.uint8)
        masks = cv2.resize(masks, (int(masks.shape[1] * scaling_factor),
                                   int(masks.shape[0] * scaling_factor)),
                           interpolation=cv2.INTER_NEAREST)
        v_pad, h_pad = (masks.shape[0] - size[0]) // 2, (masks.shape[1] -
                                                         size[1]) // 2
        masks = masks[v_pad:-v_pad, h_pad:-h_pad]

        rois = results["rois"] * scaling_factor
        for roi in rois:
            roi[0] = min(max(0, roi[0] - v_pad), size[0])
            roi[1] = min(max(0, roi[1] - h_pad), size[1])
            roi[2] = min(max(0, roi[2] - v_pad), size[0])
            roi[3] = min(max(0, roi[3] - h_pad), size[1])
        return masks, rois

    def publish_segments(self):
        """
        Publishes visualization of segmentation masks continuously.
        """
        self.colors = []

        for i in range(NUM_OF_COLORS):
            self.colors.append((np.random.rand(3) * 255).astype(int))

        while True:
            if not self.stream_enabled:
                time.sleep(1 / PUBLISH_RATE)
                continue

            start_time = time.time()
            scores, masks, rois, color_img = self.get_masks()
            masked_img = np.zeros(color_img.shape).astype("uint8")
            contour_img = np.zeros(color_img.shape).astype("uint8")

            if masks is not None and scores.size != 0:
                number_of_masks = masks.shape[-1]
                # Calculate the areas of masks
                mask_areas = []
                for i in range(number_of_masks):
                    width = np.abs(rois[i][0] - rois[i][2])
                    height = np.abs(rois[i][1] - rois[i][3])
                    mask_area = width * height
                    mask_areas.append(mask_area)

                np_mask_areas = np.array(mask_areas)
                mask_indices = np.argsort(np_mask_areas)
                # Add masks in the order of there areas.
                for i in mask_indices:
                    if (scores[i] > SEGMENT_SCORE):
                        indices = np.where(masks[:, :, i] == 1)
                        masked_img[indices[0], indices[1], :] = self.colors[i]

                # Smoothen masks
                masked_img = cv2.medianBlur(masked_img, 15)
                # find countours and draw boundaries.
                gray_image = cv2.cvtColor(masked_img, cv2.COLOR_BGR2GRAY)
                ret, thresh = cv2.threshold(gray_image, 50, 255,
                                            cv2.THRESH_BINARY)
                contours, hierarchy = cv2.findContours(thresh,
                                                       cv2.RETR_EXTERNAL,
                                                       cv2.CHAIN_APPROX_NONE)
                # Draw contours:
                for contour in contours:
                    area = cv2.contourArea(contour)
                    cv2.drawContours(contour_img, contour, -1, (255, 255, 255),
                                     5)

                masked_img = cv2.addWeighted(color_img, 0.6, masked_img, 0.4,
                                             0)
                masked_img = cv2.bitwise_or(masked_img, contour_img)

                _, color_serialized = cv2.imencode(".tif", masked_img)
                self.element.entry_write("color_mask",
                                         {"data": color_serialized.tobytes()},
                                         maxlen=30)

            time.sleep(max(0, (1 / PUBLISH_RATE) - (time.time() - start_time)))

    def get_masks(self):
        """
        Gets the latest data from the realsense, preprocesses it and returns the 
        segmentation masks, bounding boxes, and scores for each detected object.
        """
        color_data = self.element.entry_read_n("realsense", "color", 1)
        depth_data = self.element.entry_read_n("realsense", "depth", 1)
        try:
            color_data = color_data[0]["data"]
            depth_data = depth_data[0]["data"]
        except IndexError or KeyError:
            raise Exception(
                "Could not get data. Is the realsense element running?")

        depth_img = cv2.imdecode(np.frombuffer(depth_data, dtype=np.uint16),
                                 -1)
        original_size = depth_img.shape[:2]
        depth_img = self.scale_and_square(depth_img, self.scaling_factor,
                                          self.input_size)
        depth_img = self.inpaint(depth_img)
        depth_img = self.normalize(depth_img)

        if self.mode == "both":
            gray_img = cv2.imdecode(np.frombuffer(color_data, dtype=np.uint16),
                                    0)
            color_img = cv2.imdecode(
                np.frombuffer(color_data, dtype=np.uint16), 1)
            gray_img = self.scale_and_square(gray_img, self.scaling_factor,
                                             self.input_size)
            input_img = np.zeros((self.input_size, self.input_size, 3))
            input_img[..., 0] = gray_img
            input_img[..., 1] = depth_img
            input_img[..., 2] = depth_img
        else:
            input_img = np.stack((depth_img, ) * 3, axis=-1)

        # Get results and unscale
        results = self.model.detect([input_img], verbose=0)[0]
        masks, rois = self.unscale(results, self.scaling_factor, original_size)

        if masks.ndim < 2 or results["scores"].size == 0:
            masks = None
            results["scores"] = None
        elif masks.ndim == 2:
            masks = np.expand_dims(masks, axis=-1)

        return results["scores"], masks, rois, color_img

    def segment(self, _):
        """
        Command for getting the latest segmentation masks and returning the results.
        """
        scores, masks, rois, color_img = self.get_masks()
        # Encoded masks in TIF format and package everything in dictionary
        encoded_masks = []

        if masks is not None and scores is not None:
            for i in range(masks.shape[-1]):
                _, encoded_mask = cv2.imencode(".tif", masks[..., i])
                encoded_masks.append(encoded_mask.tobytes())
            response_data = {
                "rois": rois.tolist(),
                "scores": scores.tolist(),
                "masks": encoded_masks
            }

        else:
            response_data = {"rois": [], "scores": [], "masks": []}

        return Response(response_data, serialize=True)
Ejemplo n.º 19
0
class Picamera:
    
    def __init__(self, element_name, width, height, fps, retry_delay):
        self._width = width
        self._height = height
        self._fps = fps
        self._retry_delay = retry_delay

        self._status_is_running = False
        self._status_lock = Lock()
        
        # Init element
        self._element = Element(element_name)
        self._element.healthcheck_set(self.is_healthy)
        #self._element.command_add(command_name, command_func_ptr, timeout, serialize)
        
        # Run command loop
        thread = Thread(target=self._element.command_loop, daemon=True)
        thread.start()
    
    def is_healthy(self):
        # Reports whether the camera is connected or not
        try:
           self._status_lock.acquire()
           if self._status_is_running:
               return Response(err_code=0, err_str="Camera is good")
           else:
               return Response(err_code=1, err_str="Camera is not good")
        except:
           return Response(err_code=0, err_str="Could not reach thread")

    def run_camera_stream(self):
        while True:
            try:
                # try to open up camera
                self._element.log(LogLevel.INFO, "Opening PiCamera")
                self._camera = PiCamera()
                self._color_array = PiRGBArray(self._camera)
                
                # set camera configs
                self._camera.resolution = (self._width, self._height)
                self._camera.framerate = self._fps
                
                # allow the camera to warm up
                time.sleep(.5)
                
                try:
                    self._status_lock.acquire()
                    self._status_is_running = True
                finally:
                    self._status_lock.release()
                
                self._element.log(LogLevel.INFO, "Picamera connected and streaming")
                
                while True:
                    start_time = time.time()
                    
                    self._camera.capture(self._color_array, format = 'bgr')
                    color_image = self._color_array.array
                    
                    #do some rotation here
                    
                    _, color_serialized = cv2.imencode(".tif", color_image)
                    
                    color_contract = ColorStreamContract(data=color_serialized.tobytes())
                    self._element.entry_write(
                            ColorStreamContract.STREAM_NAME,
                            color_contract.to_dict(),
                            serialize=ColorStreamContract.SERIALIZE,
                            maxlen=self._fps
                    )
                    time.sleep(max(1 / self._fps - (time.time() - start_time),0))
                    self._color_array.truncate(0)
            except:
                self._element.log(LogLevel.INFO, "Camera threw exception: %s" % (sys.exc_info()[1]))

            finally:
                try:
                    self._status_lock.acquire()
                    self._status_is_running = False
                    self._camera.close()
                finally: 
                    self._status_lock.release()

                time.sleep(self._retry_delay)
Ejemplo n.º 20
0
import cv2
import numpy as np
import os
import sys
import time
from atom import Element
from atom.messages import LogLevel
from PyQt5.QtWidgets import QApplication, QWidget, QComboBox, QLabel, QMainWindow, QToolBar, QPushButton, QSizePolicy
from PyQt5.QtCore import Qt, QThread, pyqtSignal, pyqtSlot
from PyQt5.QtGui import QPixmap, QImage
from qimage2ndarray import array2qimage

LOGO_PATH = "assets/logo.png"

element = Element("stream-viewer")
stream = ""


class StreamThread(QThread):
    change_pixmap = pyqtSignal(QImage)
    max_size = 8192
    hz = 30
    img = None

    def run(self):
        """
        Gets the latest data from the current stream and sends it to be displayed on the main window.
        """
        global stream
        last_set = time.time()
        # tracks whether streaming was active in the last iteration of the while loop below