def driving_position(data): ''' Moves the robot to the driving position ''' # Make the element we'll be using to run the demo run_element = Element("run_demo") # turn on control set_control(run_element, True) res = run_element.command_send( "robot_api", "trajectory_fastest", { "t": [["joint_0", [-1.29638671875]], ["joint_1", [140]], ["joint_2", [-140]], ["joint_3", [0]], ["joint_4", [0]], ["joint_5", [0]]], "v": VEL, "a": ACCEL }, serialize=True) if (res['err_code'] != 0): return Response(err_code=1, err_str="Failed to move to driving position", serialize=True) return Response("Success", serialize=True)
def run_demo(data): ''' Runs the demo ''' # Make the element we'll be using to run the demo run_element = Element("run_demo") # turn on control set_control(run_element, True) # Note that we haven't found the ball found = False # Run over the observation list for (x, y) in OBSERVATION_LIST: result = scan_height(run_element, x, y) if result is None: found = False continue found = True break # If we found the ball, go ahead and grab it if found: res = run_element.command_send( "ros", "move_to_pos", { "xyz": [result[0] + TRANSFORM_X, result[1] + TRANSFORM_Y, HEIGHT_Z], "rpy": [CAMERA_R, CAMERA_P, CAMERA_Y], "v": VEL, "a": ACCEL }, serialize=True) if (res['err_code'] != 0): return Response(err_code=3, err_str="Failed to move to grasp pos", serialize=True) # Go home res = run_element.command_send("robot_api", "home", {}, serialize=True) if (res['err_code'] != 0): return Response(err_code=4, err_str="Failed to go home", serialize=True) # Based on whether or not we found the ball return an appropriate # response if found: return Response("Success", serialize=True) else: return Response(err_code=5, err_str="Failed to find the ball", serialize=True)
def is_healthy(self): # Reports whether the camera is connected or not try: self._status_lock.acquire() if self._status_is_running: return Response(err_code=0, err_str="Camera is good") else: return Response(err_code=1, err_str="Camera is not good") except: return Response(err_code=0, err_str="Could not reach thread")
def is_healthy(self): """ Reports whether the realsense is connected and streaming or not """ try: self._status_lock.acquire() if self._status_is_running: return Response(err_code=0, err_str="Realsense online") else: return Response(err_code=1, err_str="Waiting for realsense") finally: self._status_lock.release()
def set_stream(self, data): """ Sets streaming of segmented masks to true or false. """ data = data.decode().strip().lower() if data == "true": self.stream_enabled = True elif data == "false": self.stream_enabled = False else: return Response(f"Expected bool, got {type(data)}.") return Response(f"Streaming set to {self.stream_enabled}")
def wait_recording(data): ''' Waits for a recording to finish ''' # Active recordings global active_recordings # Make sure the recording is active if data not in active_recordings: return Response(err_code=1, err_str="Recording {} not active".format(data), serialize=True) start_time = time.time() active_recordings[data].join() stop_time = time.time() return Response("Returned after {} seconds".format(stop_time - start_time), serialize=True)
def stop_recording(data): ''' Stops a recording. Data should be a msgpack'd string of the name ''' # Active recordings global active_recordings # Make sure the recording is active if data not in active_recordings: return Response(err_code=1, err_str="Recording {} not active".format(data), serialize=True) # Note the thread and delete it from the active recordings object thread = active_recordings.pop(data) # Wait for the recording thread to finish thread.join() return Response("Success", serialize=True)
def run_transform_estimator(self, *args): """ Runs the transform estimation procedure, which saves the transform to disk. """ process = subprocess.Popen(self._calibration_client_path, stderr=subprocess.PIPE) out, err = process.communicate() return Response( data=CalculateTransformCommand.Response().to_data(), err_code=process.returncode, err_str=err.decode(), serialize=CalculateTransformCommand.Response.SERIALIZE )
def set_mode(self, data): """ Sets the mode of the algorithm and loads the corresponding weights. 'both' means that the algorithm is considering grayscale and depth data. 'depth' means that the algorithm only considers depth data. """ mode = data.decode().strip().lower() if mode not in MODES: return Response(f"Invalid mode {mode}") self.mode = mode config = YamlConfig(self.config_path) inference_config = MaskConfig(config['model']['settings']) inference_config.GPU_COUNT = 1 inference_config.IMAGES_PER_GPU = 1 model_path = MODEL_PATHS[self.mode] model_dir, _ = os.path.split(model_path) self.model = modellib.MaskRCNN(mode=config['model']['mode'], config=inference_config, model_dir=model_dir) self.model.load_weights(model_path, by_name=True) self.element.log(LogLevel.INFO, f"Loaded weights from {model_path}") return Response(f"Mode switched to {self.mode}")
def home(data): ''' Moves the robot home ''' # Make the element we'll be using to run the demo run_element = Element("run_demo") res = run_element.command_send("robot_api", "home", { "v": VEL, "a": ACCEL }, serialize=True) if (res['err_code'] != 0): return Response(err_code=1, err_str="Failed to move to home position", serialize=True) # turn off control set_control(run_element, False) return Response("Success", serialize=True)
def move_right(self, steps): """ Command for moving AtomBot in right for a number of steps. Args: steps: Number of steps to move. """ # Note that we are responsible for converting the data type from the sent command steps = int(steps) if steps < 0 or steps > self.max_pos: # If we encounter an error, we can send an error code and error string in the response of the command return Response( err_code=1, err_str=f"Steps must be between 0 and {self.max_pos}") # Update the position try: self.pos_lock.acquire() self.pos = min(self.max_pos, self.pos + steps) finally: self.pos_lock.release() # If successful, we simply return a success string return Response(data=f"Moved right {steps} steps.", serialize=True)
def transform(self, _): """ Command for transforming AtomBot! """ # Notice that we must have a single parameter to a command, even if we aren't using it. # Update bot ascii representation try: self.bot_lock.acquire() if self.atombot == "o": self.atombot = "O" else: self.atombot = "o" finally: self.bot_lock.release() return Response(data=f"Transformed to {self.atombot}!", serialize=True)
def get_recording(data): ''' Returns the contents of a recording. Takes a msgpack serialized request object with the following fields: name: required recording name start: start entry index stop: stop entry index msgpack: if we should use msgpack to deserialize values, assumed false ''' # Load the recording using the function we share with plot_recording result = _get_recording(data) if type(result) is not list: return result else: return Response(result, serialize=True)
def list_recordings(data): ''' Returns a list of all recordings in the system ''' recordings = [] # Loop over all locations for folder in [PERM_RECORDING_LOC, TEMP_RECORDING_LOC]: # If the folder doesn't exist, just move on if not os.path.exists(folder): continue # Loop over all folders in the location for filename in os.listdir(folder): # If it ends with our extension, then add it if filename.endswith(RECORDING_EXTENSION): recordings.append(os.path.splitext(filename)[0]) return Response(recordings, serialize=True)
def segment(self, _): """ Command for getting the latest segmentation masks and returning the results. """ scores, masks, rois, color_img = self.get_masks() # Encoded masks in TIF format and package everything in dictionary encoded_masks = [] if masks is not None and scores is not None: for i in range(masks.shape[-1]): _, encoded_mask = cv2.imencode(".tif", masks[..., i]) encoded_masks.append(encoded_mask.tobytes()) response_data = { "rois": rois.tolist(), "scores": scores.tolist(), "masks": encoded_masks } else: response_data = {"rois": [], "scores": [], "masks": []} return Response(response_data, serialize=True)
def command_cb(self, data): self.q.put(data.decode('ascii')) return Response(data="Success")
def is_healthy(self): # This is an example health-check, which can be used to tell other elements that depend on you # whether you are ready to receive commands or not. Any non-zero error code means you are unhealthy. return Response(err_code=0, err_str="Everything is good")
def track_ball(run_element, x, y, z, max_iter): ''' Runs an iteration of the tracking and returns a bbox with the ball location ''' # Move to the observation point res = run_element.command_send("ros", "move_to_pos", { "xyz": [x, y, z], "rpy": [CAMERA_R, CAMERA_P, CAMERA_Y], "v": VEL, "a": ACCEL }, serialize=True) if (res['err_code'] != 0): return Response(err_code=1, err_str="Failed to move to observation pos", serialize=True) # Move the robot to try to center the ball curr_x = x curr_y = y # Note that we're not done final_bbox = None # Iterations i_num = 0 # Loop until we have the ball centered while (final_bbox == None) and (i_num < max_iter): # Increase the iteration num i_num += 1 # Sleep for a second time.sleep(SLEEP_TIME) # Get the most recent list of bboxes data = run_element.entry_read_n("segmentation", "bboxes", 1, deserialize=True) print(data) # If there's no data, we need to just continue if len(data) == 0: print("No objects seen!") continue # Loop through the list of bboxes and check for ones # where size_x and size_y are roughly the same and roughly # the size of a tennis ball to rule everything else out for bbox in data[0]['data']: # See if it's roughly a tennis ball if (abs(bbox['w'] - bbox['h']) / bbox['w']) > TENNIS_BALL_SIZE_MISMATCH: print("Bbox {} not a tennis ball, skipping".format(bbox)) continue # If we got the tennis ball and we're within the range of "close enough" # just go for it if max(abs(bbox['c_x']), abs(bbox['c_y'])) < CENTERED_THRESH: final_bbox = bbox print("final bbox") break # If we got here then it's a tennis ball, so we want to make # a move to it curr_y -= bbox['c_x'] curr_x -= bbox['c_y'] # Move to the observation point res = run_element.command_send( "ros", "move_to_pos", { "xyz": [curr_x, curr_y, z], "rpy": [CAMERA_R, CAMERA_P, CAMERA_Y], "v": VEL, "a": ACCEL }, serialize=True) if (res['err_code'] != 0): return Response(err_code=2, err_str="Failed to move to observation pos", serialize=True) return (curr_x, curr_y, z - final_bbox['c_z']) if (i_num != max_iter) else None
def _get_recording(data): ''' Returns the contents of a recording. Takes a msgpack serialized request object with the following fields: name: required recording name start: start entry index stop: stop entry index msgpack: if we should use msgpack to deserialize values, assumed false Will return a Response() type on error, else a list of all items in the recording. ''' if (("name" not in data) or (type(data["name"]) is not str)): return Response(err_code=1, err_str="Name is required", serialize=True) name = data["name"] file = None for folder in [PERM_RECORDING_LOC, TEMP_RECORDING_LOC]: filename = os.path.join(folder, name + RECORDING_EXTENSION) if os.path.exists(filename): try: file = open(filename, 'rb', buffering=0) break except: return Response( err_code=2, err_str="Failed to open file {}".format(filename), serialize=True) # Make sure we found the file if file is None: return Response(err_code=3, err_str="No recording {}".format(name), serialize=True) start_idx = 0 stop_idx = -1 use_msgpack = False if ("start" in data) and (type(data["start"]) is int): start_idx = data["start"] if ("stop" in data) and (type(data["stop"]) is int): stop_idx = data["stop"] if ("msgpack" in data) and (type(data["msgpack"]) is bool): use_msgpack = data["msgpack"] # Now, we want to loop over the file. Note that when we packed the file # we packed it as individual msgpack objects with no padding/association # between them so we need to use the msgpack streaming API unpacker = msgpack.Unpacker(file, raw=False) response_items = [] for i, unpacked in enumerate(unpacker): if (i >= start_idx): # Make the repacked = (unpacked["id"], {}) # If we should use msgpack to deserialize for k in unpacked: if k != "id": if use_msgpack: repacked[1][k] = msgpack.unpackb(unpacked[k], raw=False) else: repacked[1][k] = unpacked[k] response_items.append(repacked) if ((stop_idx != -1) and (i >= stop_idx)): break return response_items
def plot_recording(data): ''' Makes a plot of the recording. Takes a msgpack-serialized JSON object with the following fields name : required recording name plots: list of plots to make, where each item in the list is a list as well. Each item in the plots list is a tuple, with values: - 0 : lambda function to perform on the data. The data will be passed to the lambda as a dictionary named `x` - 1 : list of keys on which to perform the lambda function - 2 : optional label An example plots field would look like: "plots": [ { "data": [ ["x[0]", ["joint_0", "joint_1"], "label0"], ], "title": "Some Title", "y_label": "Some Y Label", "x_label": "Some X Label", "legend": true/false, }, { "data": [ ["x[1]", ["joint_0", "joint_1"], "label1"], ["x[2]", ["joint_0", "joint_1"], "label2"], ], ... } ] start: Entry index to start the plot at stop: Entry index to stop the plot at msgpack: Whether or not to use msgpack to deserialize each key on readback from the recording. Default false save: Optional, if true will save an image of each plot, default false show: Optional, default true, will show the plots in an interactive fashion perm: Optional, default false. If true will save in the permanent file location, else temporary x: Optional lambda for converting an entry into a timestamp. If not passed, will use the redis timestamp. If passed, will be a lambda for an entry lambda entry: ... where the user supplies ... to convert the entry into an x-label ''' # Load the recording. If we failed to load it just return that error result = _get_recording(data) if type(result) is not list: return result # Get the number of results n_results = len(result) if (n_results == 0): return Response(err_code=4, err_str="0 results for recording", serialize=True) # We should have a list of all of the entries that we care about seeing # and now for each entry need to go ahead and run all of the lambdas if ("plots" not in data) or (type(data["plots"]) is not list): return Response(err_code=5, err_str="Plots must be specified", serialize=True) # Note the plots plots = data["plots"] if ("x" in data): try: x_lambda = eval("lambda entry: " + data["x"]) x_data = [x_lambda(entry[1]) for entry in result] x_label = str(data["x"]) except: return Response( err_code=6, err_str="Unable to convert {} to x data lambda".format( data["x"])) else: x_data = [int(entry[0].split('-')[0]) for entry in result] x_label = "Redis Timestamp (ms)" # Turn the x data into a numpy array and subtract off the first item # so that the scale is reasonable x_data = np.array(x_data) x_data -= x_data[0] # Convert the input data to lambdas figures = [] for plot_n, plot in enumerate(plots): # List of lambdas to run lambdas = [] total_lines = 0 # Get the plot data if ("data" not in plot) or (type(plot["data"]) is not list): return Response(err_code=7, err_str="Each plot must have a data list", serialize=True) plot_data = plot["data"] # Make the lambda for val in plot_data: # Make sure the length of the array is proper if ((len(val) < 2) or (len(val) > 3)): return Response( err_code=8, err_str="plot value {} does not have 2 or 3 items".format( val), serialize=True) # Try to make the lambda from the first one try: lamb = eval("lambda x: " + val[0]) except: return Response(err_code=9, err_str="Unable to make lambda from {}".format( val[0]), serialize=True) # Make sure each key exists in the first data item for key in val[1]: if key not in result[0][1]: return Response(err_code=10, err_str="Key {} not in data".format(key), serialize=True) # Add the number of keys in this lambda to the total number of lines total_lines += len(val[1]) # Get the label if len(val) == 3: label = str(val[2]) else: label = str(val[0]) lambdas.append((lamb, val[1], label)) # Now we want to preallocate the data for the plot. It should be a # matrix that's n-dimensional by lambda-key pair and entry to_plot = np.zeros((total_lines, n_results)) # And finally we want to loop over all of the data for i, val in enumerate(result): idx = 0 for (l, keys, label) in lambdas: for key in keys: to_plot[idx][i] = l(val[1][key]) idx += 1 # Now, we can go ahead and make the figure fig = plt.figure() figures.append(fig) # Plot all of the lines idx = 0 for (l, keys, label) in lambdas: for key in keys: plt.plot(x_data, to_plot[idx, :], label=label + "-" + key) idx += 1 # Make the title, x label, y label and legend title = "Recording-{}-{}".format( data["name"], plot.get("title", "Plot {}".format(plot_n))) plt.title(title) # Make the x label plt.xlabel(plot.get("x_label", x_label)) # Make the y label plt.ylabel(plot.get("y_label", "Value")) # Make the legend if plot.get("legend", True): plt.legend() # If we are supposed to save the figures, do so if data.get("save", False): fig.savefig( os.path.join( PERM_RECORDING_LOC if data.get("perm", False) else TEMP_RECORDING_LOC, title)) # Draw the new plot if data.get("show", True): plt.show() return Response("Success", serialize=True)
def csv_recording(data): ''' Converts a recording to CSV. Takes a msgpack'd object with the following parameters name: required. Recording name perm: Optional, default false. Whether to save the files in the permanent or temporary location. Will also lambdas: Optional. Multi-typed, either dictionary or string. If dictionary, Dictionary of key : lambda values to convert keys into an iterable. Each key will get its own CSV sheet and each column will be an iterable from the value returned. Lambda will be lambda: val If string, same as above but applied to all keys msgpack: Optional, default false. Will use msgpack to deserialize the key data before passing it to the lambda or attempting to iterate over it. x: Optional, default uses redis ID. Specify a lambda on the entry to generate the "x" column (column 0) of the CSV file desc: Optional. Description to add to filename s.t. it doesn't overwrite pre-existing data ''' result = _get_recording(data) if type(result) is not list: return result # If we got a result then we want to go ahead and make a CSV file for # each key files = {} desc = data.get("desc", "") for key in result[0][1]: filename = os.path.join( PERM_RECORDING_LOC if data.get("perm", False) else TEMP_RECORDING_LOC, "{}-{}-{}.csv".format(data["name"], desc, key)) try: files[key] = open(filename, "w") except: return Response(err_code=4, err_str="Failed to open file {}".format(filename), serialize=True) # And then loop over the data and write to the file x_lambda = data.get("x", None) if x_lambda is not None: try: x_lambda = eval("lambda entry: " + x_lambda) except: return Response( err_code=5, err_str="Failed to convert {} to lambda".format(x_lambda), serialize=True) # Get the general list of lambdas lambdas = data.get("lambdas", None) if lambdas is not None: if type(lambdas) is dict: for key in lambdas: try: lambdas[key] = eval("lambda x: " + lambdas[key]) except: return Response( err_code=6, err_str="Failed to convert {} to lambda".format( lambdas[key]), serialize=True) elif type(lambdas) is str: try: l_val = eval("lambda x: " + lambdas) except: return Response( err_code=6, err_str="Failed to convert {} to lambda".format(lambdas), serialize=True) # Make a dictionary with the same lambda for each key lambdas = {} for key in result[0][1]: lambdas[key] = l_val else: return Response(err_code=7, err_str="Lambdas argument must be dict or string", serialize=True) # Loop over the data for (redis_id, entry) in result: # Get the x value to write to the file if x_lambda is not None: x_val = x_lambda(entry) else: x_val = redis_id.split('-')[0] # For each key, write the key and its data to the file for key in entry: # Value by default is just entry[key] val = entry[key] # If we have some lambdas then we need to perhaps transform the # value in that manner if lambdas is not None and key in lambdas: val = lambdas[key](val) # Make the line for the CSV, starting off with the x value buff = "{},".format(x_val) # And add each item from the iterable try: for v in val: buff += "{},".format(v) except: buff += "{}".format(val) # Finish off with a newline and write to file buff += "\n" files[key].write(buff) # And note the success return Response("Success", serialize=True)
def start_recording(data): # Data should be a dictionary with the following keys # name: required. String for the name of the recording # t: Optional time (in seconds) to record for. If omitted, will # default to 10 # n: Optional number of entries to record for. If omitted will default # to default time. If both time and n are specified, n will # take precedence # perm: Optional boolean to make the recording persistent/permanent. # Will store the recording in a different location if so # e: Required element name # s: Required stream name global active_recordings # Make sure we got a name if ("name" not in data) or (type(data["name"]) is not str): return Response(err_code=1, err_str="name must be in data", serialize=True) # Make sure we got an element if ("e" not in data) or (type(data["e"]) is not str): return Response(err_code=2, err_str="element must be in data", serialize=True) # Make sure we got a stream if ("s" not in data) or (type(data["s"]) is not str): return Response(err_code=3, err_str="stream must be in data", serialize=True) # Get the name name = data["name"] element = data["e"] stream = data["s"] # Check that the name is not in use if name in active_recordings: return Response(err_code=4, err_str="Name {} already in use".format(name), serialize=True) n_entries = None n_sec = DEFAULT_N_SEC perm = False # Process either the n or t values that came in over the API if ("n" in data) and (type(data["n"]) is int): n_entries = data["n"] if ("t" in data) and (type(data["t"]) is int): n_sec = data["t"] if ("perm" in data) and (type(data["perm"]) is bool): perm = data["perm"] # If we have a permanent data request, make sure the user has # mounted a permanent location if perm and not os.path.exists(PERM_RECORDING_LOC): return Response( err_code=5, err_str="Please mount {} in your docker-compose file".format( PERM_RECORDING_LOC), serialize=True) # Spawn a new thread that will go ahead and do the recording thread = Thread(target=record_fn, args=( name, n_entries, n_sec, perm, element, stream, ), daemon=True) # Put the thread into the active_recordings struct active_recordings[name] = thread thread.start() # Make the response return Response(\ "Started recording {} for {} and storing in {}".format( name, \ "{} entries".format(n_entries) if n_entries != None else "{} seconds".format(n_sec), \ PERM_RECORDING_LOC if perm else TEMP_RECORDING_LOC), \ serialize=True)
def get_mode(self, _): """ Returns the current mode of the algorithm (both or depth). """ return Response(self.mode)