def main(): """Get input, parse it, and pass it on to process.""" client = PipelineClient(verbose=True) if len(sys.argv) > 1: text = " ".join(sys.argv[1:]) parse = client.parse(text) if parse: process(parse) else: print "Connection to server closed." else: while True: try: text = raw_input('> ') except (KeyboardInterrupt, EOFError): break print "text: ",[text] semlog_command = False if MONGODB: semlog_command = semlog.is_command(text) if semlog_command: print semlog_command else: parse = client.parse(text) if parse: semantic_structures = process(parse) if semantic_structures and MONGODB: semlog.log_structures(text,semantic_structures) if semantic_structures: for s in semantic_structures: print "semantic_structure dict: ",s.to_dict() else: print "Connection to server closed." break
def stress(): """Stress the pipeline.""" try: while True: parser = PipelineClient() word = ''.join(random.choice(string.ascii_letters) for _ in range(10)) parser.parse("This is the tremendously violent stress test of {}.".format(word)) parser.close() except KeyboardInterrupt: pass
def stress(): """Stress the pipeline.""" parser = PipelineClient() while True: try: parser.parse("This is the tremendously violent stress test of doom.") sleep(.1) except KeyboardInterrupt: break parser.close()
class SemanticsHandler(object): def __init__(self): self.client = PipelineClient(verbose=True) def get_semantic_structures(self, sentence): parse = self.client.parse(sentence) return self.process(parse, verbose=False) def process(self, parse, verbose=True): """Show the steps of transformation for a parse.""" # Original parse parse_tree = Tree.parse(parse) frames = extract_frames_from_parse(parse, verbose=verbose) if verbose: print for frame in frames: print frame.pprint() if frame.condition: print "Condition:" print frame.condition.pprint() print # Bail if no frames matched if not frames: if verbose: print "No frames matched." return # Extract semantic structures semantic_structures = create_semantic_structures(frames) if semantic_structures: if verbose: print semantic_structures return semantic_structures else: if verbose: print "No semantic structures returned."
def test_pair(testkey,testsent,trueframes, truecommands): tree = PipelineClient().parse(testsent) new_frames, new_commands, kb_response = extract_commands(tree, verbose=True) new_commands = [str(command) for command in new_commands] new_frames = [frame.pprint() for frame in new_frames] self.assertEqual(new_frames,trueframes) self.assertEqual(new_commands,truecommands)
def dataReceived(self, data): lines = data.split('\n') print 'Received input: ', lines for s in lines: if s.startswith('CHAT_MESSAGE_PREFIX'): s = remove_prefix(s, 'CHAT_MESSAGE_PREFIX<Commander> ') # TODO: multi-process lock parse = PipelineClient().parse(s) frames, new_commands, kb_response = process_parse_tree( parse, data, self.kb, quiet=True) self.sendMessage( 'CHAT_MESSAGE_PREFIX', '<Junior> ' + make_response(new_commands, kb_response)) self.ge.jr.plan_path(self.ge.cmdr.cell) elif s.startswith('MOVE_PLAYER_CELL'): s = remove_prefix(s, 'MOVE_PLAYER_CELL') new_x, old_x, new_y, old_y = s.split(',') self.ge.update_cmdr((int(new_x), int(new_y))) print str(self.ge) elif s.startswith('CREATE_FPSENVIRONMENT'): s = remove_prefix(s, 'CREATE_FPSENVIRONMENT') # This will be provided before environment related messages self.ge = GameEnvironment(s) lc = LoopingCall(self.ge.jr.follow_waypoints, self.sendMessage) lc.start(0.05)
def test_pair(testkey,testsent): tree = PipelineClient().parse(testsent) new_frames, new_commands, kb_response = extract_commands(tree, verbose=True) new_frames = [frame.pprint() for frame in new_frames] new_commands = [str(command) for command in new_commands] if len(new_frames) > 0: return "('" + key + "' , '" + testsent + "' , " + str(new_frames) + " , " + str(new_commands) + " )," else: return "('" + key + "' , '" + testsent + "' , " + "[] , []),"
class TestPipeline(unittest.TestCase): """Test the the pipeline.""" def setUp(self): self.client = PipelineClient() def test_parse(self): """Test that a simple sentence can be parsed.""" parse = self.client.parse("This is a test.") self.assertEqual(parse, "(S (NP-SBJ-A (DT This)) (VP (VBZ is) (NP-PRD-A (DT a) (NN test)))(. .))")
def test(): """Test by parsing some text.""" client = PipelineClient(verbose=True) # If command line arguments are supplied, run them through if len(sys.argv) > 1: msg = client.parse(" ".join(sys.argv[1:])) if not msg: print "Connection to server closed." else: print msg else: while True: try: text = raw_input('> ') except (KeyboardInterrupt, EOFError): break msg = client.parse(text) if not msg: print "Connection to server closed." break print msg
def main(): """Get input, parse it, and pass it on to process.""" client = PipelineClient(verbose=True) if len(sys.argv) > 1: text = " ".join(sys.argv[1:]) parse = client.parse(text) if parse: process(parse) else: print "Connection to server closed." else: while True: try: text = raw_input('> ') except (KeyboardInterrupt, EOFError): break parse = client.parse(text) if parse: process(parse) else: print "Connection to server closed." break
def stress(): """Stress the pipeline.""" try: while True: parser = PipelineClient() word = ''.join( random.choice(string.ascii_letters) for _ in range(10)) parser.parse( "This is the tremendously violent stress test of {}.".format( word)) parser.close() except KeyboardInterrupt: pass
def process_input(text, kb, verbose=True): """Send given text to the semantics component""" msg = PipelineClient().parse(text) if msg: print msg frames, new_commands, kb_response = process_parse_tree(msg, text, kb, quiet=True) if verbose: print "Frames: %s" % '\n'.join(str(f) for f in frames) print "Knowledge base: %s" % str(kb) print 'Response: %s' % make_response(new_commands, kb_response) print '\n'.join(str(c) for c in new_commands) return True else: print 'Connection to server closed.' return False
def dataReceived(self, data): # Remove extra escape characters data = data.replace('\\', '') print 'Received input: ', data knowledge_demo = data.startswith(SECRET_CODE) if knowledge_demo: data = data[len(SECRET_CODE):] with open(LOG_FILE, 'a') as f: f.write('%s | %s | "%s"\n' % (time.asctime(), str(self.transport.getPeer()), data)) with self.lock: parse = PipelineClient(verbose=True).parse(data) response = {} frames, new_commands, kb_response = process_parse_tree( parse, data, self.kb if knowledge_demo else None, quiet=True) response['parse'] = parse if frames is not None: # We do a join and split to make sure all whitespace becomes single spaces modified_trees = [ " ".join(str(modified_parse_tree[1]).split()) for modified_parse_tree in frames if (len(modified_parse_tree) > 1 and isinstance(modified_parse_tree[1], tree.Tree)) ] response['trees'] = list(set(modified_trees)) response['frames'] = [ frame_dict for frame_dict in [frame[0] for frame in frames if isinstance(frame[0], dict)] ] else: response['trees'] = [] response['frames'] = [] response['response'] = make_response(new_commands, kb_response) response['structures'] = '\n\n'.join(str(c) for c in new_commands) self.transport.write(json.dumps(response))
def interactive_mode(window, first_input): """Interactively get input from the user and parse it.""" input_frame, input_win, parse_win, semantic_win = setup_windows(window) # Initialize pipeline and knowledge base pipeline = PipelineClient() kb = KnowledgeBase() # Send some data through the pipeline result = pipeline.parse("This is a test.") input_frame.addstr(1, 1, 'Enter your input, then press Ctrl+G. ' 'Enter "quit" or press Ctrl+C to exit.') input_frame.refresh() # Until the input is q/quit, process data last_input = first_input while True: # Display the first input if needed input_win.erase() input_win.refresh() if last_input: input_win.addstr(0, 0, last_input) # Get text from the input box, removing any embedded newlines if first_input: text = first_input first_input = None else: text = get_input(input_win).replace("\n", "").strip() last_input = text # Quit if needed if text == "q" or text == "quit": return # Get input again if it was empty if not text: continue # Echo input and display status, clearing both windows parse_win.clear() parse_win.addstr(text) parse_win.addstr('\nParsing and restoring null elements...') parse_win.refresh() semantic_win.clear() semantic_win.refresh() # Run the parse pipeline result = pipeline.parse(text) result_tree = Tree(result) # Output the longest parse that will fit. We try to draw the # possible output in order of decreasing length. parse_max_width = parse_win.getmaxyx()[1] possible_formats = (result_tree.pprint(margin=parse_max_width, force_multiline=True), result_tree.pprint(margin=parse_max_width), result) for formatted_result in possible_formats: parse_win.clear() try: parse_win.addstr(text + '\n') parse_win.addstr(formatted_result) except _curses.error: continue else: # We've successfully printed, stop trying formats break else: parse_win.clear() parse_win.addstr("Parse too large to show.\n") parse_win.refresh() # Do the same for semantics # Echo input and display status, after clearing the window semantic_win.clear() semantic_win.addstr(text) semantic_win.addstr('\nPerforming semantic analysis...') semantic_win.refresh() frames, new_commands, kb_response = process_parse_tree(result, text, kb) semantic_win.clear() try: if frames: semantic_win.addstr("Frames matched:\n") for frame in frames: semantic_win.addstr("\t" + str(frame) + "\n") if new_commands: semantic_win.addstr("New commands:\n") for command in new_commands: semantic_win.addstr(str(command) + "\n") if kb_response: semantic_win.addstr("KB response:\n") semantic_win.addstr(str(kb_response) + "\n") if not any((frames, new_commands, kb_response)): semantic_win.addstr("No frames matched.\n") except _curses.error: semantic_win.clear() semantic_win.addstr("Semantic representation too large to show.") semantic_win.refresh() return
#!/usr/bin/env python """Run all nose tests.""" import sys import os import nose from pipelinehost import PipelineHost, PipelineClient # Run the pipelinehost in advance just in case. If there's already a # pipeline running, this will just fail without causing any problems. try: print "Checking whether PipelineHost is running..." client = PipelineClient() except IOError: print "Launching PipelineHost..." pipeline = PipelineHost(local=True) else: print "PipelineHost appears to already be running." # Put the root of SLURP on the path, just to be safe sys.path.append(os.path.dirname(os.path.abspath(__file__))) nose.main()
def generate(self, text, sensors, regions, props, tag_dict, realizable_reactions=False): """Generate a logical specification from natural language and propositions.""" # Clean unicode out of everything text = text.encode('ascii', 'ignore') self.sensors = [astr.encode('ascii', 'ignore') for astr in sensors] self.regions = [astr.encode('ascii', 'ignore') for astr in regions] self.props = [astr.encode('ascii', 'ignore') for astr in props] self.tag_dict = {key.encode('ascii', 'ignore'): [value.encode('ascii', 'ignore') for value in values] for key, values in tag_dict.items()} print "NL->LTL Generation called on:" print "Sensors:", self.sensors print "Props:", self.props print "Regions:", self.regions print "Tag dict:", self.tag_dict print "Text:", repr(text) print # Make lists for POS conversions, including the metapar keywords force_nouns = list(self.regions) + list(self.sensors) force_verbs = list(self.props) + self.GOALS.keys() parse_client = PipelineClient() results = [] responses = [] custom_props = set() custom_sensors = set() self.generation_trees = OrderedDict() for line in text.split('\n'): # Strip the text before using it and ignore any comments line = line.strip() line = _remove_comments(line) if not line: # Blank lines are counted as being processed correctly but are skipped results.append(True) responses.append('') continue # Init the generation tree to the empty result generated_lines = defaultdict(list) self.generation_trees[line] = generated_lines print "Sending to remote parser:", repr(line) parse = parse_client.parse(line, force_nouns, force_verbs=force_verbs) print "Response from parser:", repr(parse) frames, new_commands, kb_response = process_parse_tree(parse, line, self.kbase) if SEMANTICS_DEBUG: print "Returned values from semantics:" print "Semantics results:" for frame in frames: print "\t" + str(frame) print "New commands:", new_commands # Build the metapars # For now, assume success if there were commands or a kb_response success = bool(new_commands) or bool(kb_response) command_responses = [kb_response] if kb_response else [] for command in new_commands: try: new_sys_lines, new_env_lines, new_custom_props, new_custom_sensors = \ self._apply_metapar(command) except KeyError as err: problem = \ "Could not understand {!r} due to error {}.".format(command.action, err) print "ERROR: " + problem command_responses.append(str(err)) success = False continue else: command_responses.append(respond_okay(command.action)) # Add in the new lines generated_lines[_format_command(command)].extend(new_sys_lines) generated_lines[_format_command(command)].extend(new_env_lines) # Add custom props/sensors custom_props.update(new_custom_props) custom_sensors.update(new_custom_sensors) # If we've got no responses, say we didn't understand at all. if not command_responses: command_responses.append(respond_nocommand()) # Add responses and successes results.append(success) responses.append(' '.join(command_responses)) print # We need to modify non-reaction goals to be or'd with the reactions if realizable_reactions and self.react_props: # Dedupe and make an or over all the reaction properties reaction_or_frag = or_([sys_(prop) for prop in self.react_props]) # HACK: Rewrite all the goals! # TODO: Test again once we re-enable reaction propositions for command_spec_lines in self.generation_trees.values(): for spec_lines in command_spec_lines.values(): spec_lines.lines = [_insert_or_before_goal(reaction_or_frag, line) for line in spec_lines.lines] # Aggregate all the propositions # Identify goal numbers as we loop over sys lines sys_lines = [] # The zeroth goal is always []<>(TRUE), so we skip it. goal_idx = 1 for input_text, command_spec_lines in self.generation_trees.items(): for command, spec_lines_list in command_spec_lines.items(): for spec_lines in spec_lines_list: if not spec_lines.issys(): continue for line in spec_lines.lines: spec_lines.input = input_text sys_lines.append(line) if isgoal(line): spec_lines.goal_indices.add(goal_idx) goal_idx += 1 # Filter out any duplicates from the env_lines env_lines = OrderedDict() for command_spec_lines in self.generation_trees.values(): for spec_lines_list in command_spec_lines.values(): for spec_lines in spec_lines_list: if not spec_lines.isenv(): continue for line in spec_lines.lines: env_lines[line] = None env_lines = env_lines.keys() # Convert sets to lists for the caller custom_props = list(custom_props) custom_sensors = list(custom_sensors) print "Spec generation complete." print "Results:", results print "Responses:", responses print "Environment lines:", env_lines print "System lines:", sys_lines print "Custom props:", custom_props print "Custom sensors:", custom_sensors print "Generation tree:", self.generation_trees return (env_lines, sys_lines, custom_props, custom_sensors, results, responses, self.generation_trees)
def __init__(self): self.client = PipelineClient(verbose=True)
def setUp(self): self.client = PipelineClient()
def generate(self, text, sensors, regions, props, tag_dict, realizable_reactions=True, verbose=True): """Generate a logical specification from natural language and propositions.""" # Clean unicode out of everything text = text.encode('ascii', 'ignore') self.sensors = [astr.encode('ascii', 'ignore') for astr in sensors] self.regions = [astr.encode('ascii', 'ignore') for astr in regions] self.props = [astr.encode('ascii', 'ignore') for astr in props] self.tag_dict = {key.encode('ascii', 'ignore'): [value.encode('ascii', 'ignore') for value in values] for key, values in tag_dict.items()} if verbose: print "NL->LTL Generation called on:" print "Sensors:", self.sensors print "Props:", self.props print "Regions:", self.regions print "Tag dict:", self.tag_dict print "Text:", repr(text) print # Make lists for POS conversions, including the metapar keywords force_nouns = list(self.regions) + list(self.sensors) force_verbs = self.GOALS.keys() # Set up parse_client = PipelineClient() results = [] responses = [] custom_props = set() self.react_props = set() # TODO: Make this a local custom_sensors = set() generation_trees = OrderedDict() # Add the actuator mutex if len(self.props) > 1: actuator_mutex = mutex_([sys_(prop) for prop in self.props], True) generation_trees["Safety assumptions"] = \ {"Safety assumptions": [SpecChunk("Robot can perform only one action at a time.", [actuator_mutex, always(actuator_mutex)], SpecChunk.SYS, None)]} for line in text.split('\n'): # Strip the text before using it and ignore any comments line = line.strip() line = _remove_comments(line) if not line: # Blank lines are counted as being processed correctly but are skipped results.append(True) responses.append('') continue # Init the generation tree to the empty result generated_lines = OrderedDict() generation_trees[line] = generated_lines if verbose: print "Sending to remote parser:", repr(line) parse = parse_client.parse(line, force_nouns, force_verbs=force_verbs) if verbose: print "Response from parser:", repr(parse) frames, new_commands, kb_response = \ process_parse_tree(parse, line, self.kbase, quiet=True) frames, new_commands, kb_response = process_parse_tree(parse, line, self.kbase, quiet=not verbose) # Build the metapars # For now, assume success if there were commands or a kb_response success = bool(new_commands) or bool(kb_response) command_responses = [kb_response] if kb_response else [] for command in new_commands: if COMMAND_DEBUG: print "Processing command:" print command try: new_sys_lines, new_env_lines, new_custom_props, new_custom_sensors = \ self._apply_metapar(command) except KeyError as err: cause = err.message problem = \ "Could not understand {!r} due to error {}.".format(command.action, cause) if verbose: print >> sys.stderr, "Error: " + problem command_responses.append(cause) success = False continue else: command_responses.append(respond_okay(command.action)) # Add in the new lines command_key = _format_command(command) if command_key not in generated_lines: generated_lines[command_key] = [] generated_lines[command_key].extend(new_sys_lines) generated_lines[command_key].extend(new_env_lines) # Add custom props/sensors custom_props.update(new_custom_props) custom_sensors.update(new_custom_sensors) # If we've got no responses, say we didn't understand at all. if not command_responses: command_responses.append(respond_nocommand()) # Add responses and successes results.append(success) responses.append(' '.join(command_responses)) # Add some space between commands if verbose: print if COMMAND_DEBUG: print "Generation trees:" for line, output in generation_trees.items(): print line print output print # We need to modify non-reaction goals to be or'd with the reactions if realizable_reactions and self.react_props: # Dedupe and make an or over all the reaction properties reaction_or_frag = or_([sys_(prop) for prop in self.react_props]) # HACK: Rewrite all the goals! # TODO: Test again with reaction propositions other than defuse for command_spec_chunks in generation_trees.values(): for spec_chunks in command_spec_chunks.values(): for spec_chunk in spec_chunks: if not spec_chunk.issys(): continue spec_chunk.lines = [_insert_or_before_goal(reaction_or_frag, line) for line in spec_chunk.lines] # Aggregate all the propositions # Identify goal numbers as we loop over sys lines sys_lines = [] # At the moment, there are no useless goals in specs, so we # begin at 0 goal_idx = 0 for input_text, command_spec_lines in generation_trees.items(): for command, spec_lines_list in command_spec_lines.items(): for spec_lines in spec_lines_list: if not spec_lines.issys(): continue for line in spec_lines.lines: spec_lines.input = input_text sys_lines.append(line) if isgoal(line): spec_lines.goal_indices.add(goal_idx) goal_idx += 1 # Filter out any duplicates from the env_lines env_lines = OrderedDict() for command_spec_lines in generation_trees.values(): for spec_lines_list in command_spec_lines.values(): for spec_lines in spec_lines_list: if not spec_lines.isenv(): continue for line in spec_lines.lines: env_lines[line] = None env_lines = env_lines.keys() # Convert sets to lists for the caller custom_props = list(custom_props) custom_sensors = list(custom_sensors) if verbose: print "Spec generation complete." print "Results:", results print "Responses:", responses print "Environment lines:", env_lines print "System lines:", sys_lines print "Custom props:", custom_props print "Custom sensors:", custom_sensors print "Generation trees:", generation_trees return (env_lines, sys_lines, custom_props, custom_sensors, results, responses, generation_trees)
def generate(self, text, sensors, regions, props, tag_dict, realizable_reactions=True, verbose=True): """Generate a logical specification from natural language and propositions.""" # Clean unicode out of everything text = text.encode("ascii", "ignore") self.sensors = [astr.encode("ascii", "ignore") for astr in sensors] self.regions = [astr.encode("ascii", "ignore") for astr in regions] self.props = [astr.encode("ascii", "ignore") for astr in props] self.tag_dict = { key.encode("ascii", "ignore"): [value.encode("ascii", "ignore") for value in values] for key, values in tag_dict.items() } if verbose: print "NL->LTL Generation called on:" print "Sensors:", self.sensors print "Props:", self.props print "Regions:", self.regions print "Tag dict:", self.tag_dict print "Text:", repr(text) print # Make lists for POS conversions, including the metapar keywords force_nouns = list(self.regions) + list(self.sensors) force_verbs = self.GOALS.keys() # Set up parse_client = PipelineClient() results = [] responses = [] custom_props = set() self.react_props = set() # TODO: Make this a local custom_sensors = set() generation_trees = OrderedDict() # Add the actuator mutex if len(self.props) > 1: actuator_mutex = mutex_([sys_(prop) for prop in self.props], True) generation_trees["Safety assumptions"] = { "Safety assumptions": [ SpecChunk( "Robot can perform only one action at a time.", [actuator_mutex, always(actuator_mutex)], SpecChunk.SYS, None, ) ] } for line in text.split("\n"): # Strip the text before using it and ignore any comments line = line.strip() line = _remove_comments(line) if not line: # Blank lines are counted as being processed correctly but are skipped results.append(True) responses.append("") continue # Init the generation tree to the empty result generated_lines = OrderedDict() generation_trees[line] = generated_lines if verbose: print "Sending to remote parser:", repr(line) parse = parse_client.parse(line, force_nouns, force_verbs=force_verbs) if verbose: print "Response from parser:", repr(parse) frames, new_commands, kb_response = process_parse_tree(parse, line, self.kbase, quiet=True) frames, new_commands, kb_response = process_parse_tree(parse, line, self.kbase, quiet=not verbose) # Build the metapars # For now, assume success if there were commands or a kb_response success = bool(new_commands) or bool(kb_response) command_responses = [kb_response] if kb_response else [] for command in new_commands: if COMMAND_DEBUG: print "Processing command:" print command try: new_sys_lines, new_env_lines, new_custom_props, new_custom_sensors = self._apply_metapar(command) except KeyError as err: cause = err.message problem = "Could not understand {!r} due to error {}.".format(command.action, cause) if verbose: print >> sys.stderr, "Error: " + problem command_responses.append(cause) success = False continue else: command_responses.append(respond_okay(command.action)) # Add in the new lines command_key = _format_command(command) if command_key not in generated_lines: generated_lines[command_key] = [] generated_lines[command_key].extend(new_sys_lines) generated_lines[command_key].extend(new_env_lines) # Add custom props/sensors custom_props.update(new_custom_props) custom_sensors.update(new_custom_sensors) # If we've got no responses, say we didn't understand at all. if not command_responses: command_responses.append(respond_nocommand()) # Add responses and successes results.append(success) responses.append(" ".join(command_responses)) # Add some space between commands if verbose: print if COMMAND_DEBUG: print "Generation trees:" for line, output in generation_trees.items(): print line print output print # We need to modify non-reaction goals to be or'd with the reactions if realizable_reactions and self.react_props: # Dedupe and make an or over all the reaction properties reaction_or_frag = or_([sys_(prop) for prop in self.react_props]) # HACK: Rewrite all the goals! # TODO: Test again with reaction propositions other than defuse for command_spec_chunks in generation_trees.values(): for spec_chunks in command_spec_chunks.values(): for spec_chunk in spec_chunks: if not spec_chunk.issys(): continue spec_chunk.lines = [_insert_or_before_goal(reaction_or_frag, line) for line in spec_chunk.lines] # Aggregate all the propositions # Identify goal numbers as we loop over sys lines sys_lines = [] # At the moment, there are no useless goals in specs, so we # begin at 0 goal_idx = 0 for input_text, command_spec_lines in generation_trees.items(): for command, spec_lines_list in command_spec_lines.items(): for spec_lines in spec_lines_list: if not spec_lines.issys(): continue for line in spec_lines.lines: spec_lines.input = input_text sys_lines.append(line) if isgoal(line): spec_lines.goal_indices.add(goal_idx) goal_idx += 1 # Filter out any duplicates from the env_lines env_lines = OrderedDict() for command_spec_lines in generation_trees.values(): for spec_lines_list in command_spec_lines.values(): for spec_lines in spec_lines_list: if not spec_lines.isenv(): continue for line in spec_lines.lines: env_lines[line] = None env_lines = env_lines.keys() # Convert sets to lists for the caller custom_props = list(custom_props) custom_sensors = list(custom_sensors) if verbose: print "Spec generation complete." print "Results:", results print "Responses:", responses print "Environment lines:", env_lines print "System lines:", sys_lines print "Custom props:", custom_props print "Custom sensors:", custom_sensors print "Generation trees:", generation_trees return (env_lines, sys_lines, custom_props, custom_sensors, results, responses, generation_trees)