コード例 #1
0
ファイル: new_parse_server.py プロジェクト: PennNLP/SLURP
    def dataReceived(self, data):
        # Remove extra escape characters
        data = data.replace('\\','')
        print 'Received input: ', data

        knowledge_demo = data.startswith(SECRET_CODE)
        if knowledge_demo:
            data = data[len(SECRET_CODE):]

        with open(LOG_FILE, 'a') as f:
            f.write('%s | %s | "%s"\n' % (time.asctime(), str(self.transport.getPeer()), data))

        with self.lock:
            parse = PipelineClient(verbose=True).parse(data)

        response = {}
        frames, new_commands, kb_response = process_parse_tree(parse, data, self.kb if knowledge_demo else None, quiet=True)
        response['parse'] = parse
        if frames is not None:
            # We do a join and split to make sure all whitespace becomes single spaces
            modified_trees = [" ".join(str(modified_parse_tree[1]).split())
                              for modified_parse_tree in frames
                              if (len(modified_parse_tree) > 1 and
                                  isinstance(modified_parse_tree[1], tree.Tree))]
            response['trees'] = list(set(modified_trees))
            response['frames'] = [frame_dict for frame_dict in [frame[0] for frame in frames
                                                    if isinstance(frame[0], dict)]]
        else:
            response['trees'] = []
            response['frames'] = []
        response['response'] = make_response(new_commands, kb_response)
        response['structures'] = '\n\n'.join(str(c) for c in new_commands)
        self.transport.write(json.dumps(response))
コード例 #2
0
ファイル: pragbot_client.py プロジェクト: Shar-pei-bear/SLURP
    def dataReceived(self, data):
        lines = data.split('\n')
        print 'Received input: ', lines

        for s in lines:
            if s.startswith('CHAT_MESSAGE_PREFIX'):
                s = remove_prefix(s, 'CHAT_MESSAGE_PREFIX<Commander> ')
                # TODO: multi-process lock
                parse = PipelineClient().parse(s)
                frames, new_commands, kb_response = process_parse_tree(
                    parse, data, self.kb, quiet=True)
                self.sendMessage(
                    'CHAT_MESSAGE_PREFIX',
                    '<Junior> ' + make_response(new_commands, kb_response))
                self.ge.jr.plan_path(self.ge.cmdr.cell)
            elif s.startswith('MOVE_PLAYER_CELL'):
                s = remove_prefix(s, 'MOVE_PLAYER_CELL')
                new_x, old_x, new_y, old_y = s.split(',')
                self.ge.update_cmdr((int(new_x), int(new_y)))
                print str(self.ge)
            elif s.startswith('CREATE_FPSENVIRONMENT'):
                s = remove_prefix(s, 'CREATE_FPSENVIRONMENT')
                # This will be provided before environment related messages
                self.ge = GameEnvironment(s)
                lc = LoopingCall(self.ge.jr.follow_waypoints, self.sendMessage)
                lc.start(0.05)
コード例 #3
0
ファイル: debug_semantics.py プロジェクト: uml-robotics/SLURP
def process_input(text, kb, verbose=True):
    """Send given text to the semantics component"""
    msg = PipelineClient().parse(text)
    if msg:
        print msg
        frames, new_commands, kb_response = process_parse_tree(msg, text, kb, quiet=True)
        if verbose:
            print frames
            print str(kb)
        if kb_response:
            print kb_response
        print '\n'.join(str(c) for c in new_commands)
        return True
    else:
        print 'Connection to server closed.'
        return False
コード例 #4
0
ファイル: debug_semantics.py プロジェクト: PennNLP/SLURP
def process_input(text, kb, verbose=True):
    """Send given text to the semantics component"""
    msg = PipelineClient().parse(text)
    if msg:
        print msg
        frames, new_commands, kb_response = process_parse_tree(msg, text, kb, quiet=True)
        if verbose:
            print "Frames: %s" % '\n'.join(str(f) for f in frames);
            print "Knowledge base: %s" % str(kb)
        
        print 'Response: %s' % make_response(new_commands, kb_response)
        print '\n'.join(str(c) for c in new_commands)
        return True
    else:
        print 'Connection to server closed.'
        return False
コード例 #5
0
ファイル: semanticsservice.py プロジェクト: PennNLP/SLURP
def process_text(request):
    """Return a parse response."""
    request_dict = json.loads(request.in_)
    rospy.loginfo("NLP semantics request: %r" % request_dict)
    response = process_parse_tree(
        request_dict["tree"].encode("ascii", "replace"), request_dict["text"].encode("ascii", "replace")
    )
    frames, new_commands, kb_response = response
    user_response = "Got it." if not kb_response else "I understood: " + kb_response

    # TODO: Properly serialize the commands, possibly into a new message format
    command_dicts = [{"Command": str(command)} for command in new_commands]

    response_dict = dict(zip(SEMANTICS_KEYS, [user_response, command_dicts]))
    response_json = json.dumps(response_dict)
    rospy.loginfo("NLP semantics response: %r" % response_json)
    return response_json
コード例 #6
0
def process_text(request):
    """Return a parse response."""
    request_dict = json.loads(request.in_)
    rospy.loginfo("NLP semantics request: %r" % request_dict)
    response = process_parse_tree(
        request_dict['tree'].encode('ascii', 'replace'),
        request_dict['text'].encode('ascii', 'replace'))
    frames, new_commands, kb_response = response
    user_response = "Got it." if not kb_response else "I understood: " + kb_response

    # TODO: Properly serialize the commands, possibly into a new message format
    command_dicts = [{'Command': str(command)} for command in new_commands]

    response_dict = dict(zip(SEMANTICS_KEYS, [user_response, command_dicts]))
    response_json = json.dumps(response_dict)
    rospy.loginfo("NLP semantics response: %r" % response_json)
    return response_json
コード例 #7
0
def process_input(text, kb, verbose=True):
    """Send given text to the semantics component"""
    msg = PipelineClient().parse(text)
    if msg:
        print msg
        frames, new_commands, kb_response = process_parse_tree(msg,
                                                               text,
                                                               kb,
                                                               quiet=True)
        if verbose:
            print "Frames: %s" % '\n'.join(str(f) for f in frames)
            print "Knowledge base: %s" % str(kb)

        print 'Response: %s' % make_response(new_commands, kb_response)
        print '\n'.join(str(c) for c in new_commands)
        return True
    else:
        print 'Connection to server closed.'
        return False
コード例 #8
0
    def dataReceived(self, data):
        # Remove extra escape characters
        data = data.replace('\\', '')
        print 'Received input: ', data

        knowledge_demo = data.startswith(SECRET_CODE)
        if knowledge_demo:
            data = data[len(SECRET_CODE):]

        with open(LOG_FILE, 'a') as f:
            f.write('%s | %s | "%s"\n' %
                    (time.asctime(), str(self.transport.getPeer()), data))

        with self.lock:
            parse = PipelineClient(verbose=True).parse(data)

        response = {}
        frames, new_commands, kb_response = process_parse_tree(
            parse, data, self.kb if knowledge_demo else None, quiet=True)
        response['parse'] = parse
        if frames is not None:
            # We do a join and split to make sure all whitespace becomes single spaces
            modified_trees = [
                " ".join(str(modified_parse_tree[1]).split())
                for modified_parse_tree in frames
                if (len(modified_parse_tree) > 1
                    and isinstance(modified_parse_tree[1], tree.Tree))
            ]
            response['trees'] = list(set(modified_trees))
            response['frames'] = [
                frame_dict for frame_dict in
                [frame[0] for frame in frames if isinstance(frame[0], dict)]
            ]
        else:
            response['trees'] = []
            response['frames'] = []
        response['response'] = make_response(new_commands, kb_response)
        response['structures'] = '\n\n'.join(str(c) for c in new_commands)
        self.transport.write(json.dumps(response))
コード例 #9
0
ファイル: pragbot_client.py プロジェクト: PennNLP/SLURP
    def dataReceived(self, data):
        lines = data.split('\n')
        print 'Received input: ', lines

        for s in lines:
            if s.startswith('CHAT_MESSAGE_PREFIX'):
                s = remove_prefix(s, 'CHAT_MESSAGE_PREFIX<Commander> ')
                # TODO: multi-process lock
                parse = PipelineClient().parse(s)
                frames, new_commands, kb_response = process_parse_tree(parse, data, self.kb, quiet=True)
                self.sendMessage('CHAT_MESSAGE_PREFIX', '<Junior> ' + make_response(new_commands, kb_response))
                self.ge.jr.plan_path(self.ge.cmdr.cell)
            elif s.startswith('MOVE_PLAYER_CELL'):
                s = remove_prefix(s, 'MOVE_PLAYER_CELL')
                new_x, old_x, new_y, old_y = s.split(',')
                self.ge.update_cmdr((int(new_x), int(new_y)))
                print str(self.ge)
            elif s.startswith('CREATE_FPSENVIRONMENT'):
                s = remove_prefix(s, 'CREATE_FPSENVIRONMENT')
                # This will be provided before environment related messages
                self.ge = GameEnvironment(s)
                lc = LoopingCall(self.ge.jr.follow_waypoints, self.sendMessage)
                lc.start(0.05)
コード例 #10
0
ファイル: parser_demo.py プロジェクト: Shar-pei-bear/SLURP
def interactive_mode(window, first_input):
    """Interactively get input from the user and parse it."""
    input_frame, input_win, parse_win, semantic_win = setup_windows(window)

    # Initialize pipeline and knowledge base
    pipeline = PipelineClient()
    kb = KnowledgeBase()

    # Send some data through the pipeline
    result = pipeline.parse("This is a test.")
    input_frame.addstr(1, 1, 'Enter your input, then press Ctrl+G. '
                       'Enter "quit" or press Ctrl+C to exit.')
    input_frame.refresh()

    # Until the input is q/quit, process data
    last_input = first_input
    while True:
        # Display the first input if needed
        input_win.erase()
        input_win.refresh()
        if last_input:
            input_win.addstr(0, 0, last_input)

        # Get text from the input box, removing any embedded newlines
        if first_input:
            text = first_input
            first_input = None
        else:
            text = get_input(input_win).replace("\n", "").strip()
        last_input = text

        # Quit if needed
        if text == "q" or text == "quit":
            return

        # Get input again if it was empty
        if not text:
            continue

        # Echo input and display status, clearing both windows
        parse_win.clear()
        parse_win.addstr(text)
        parse_win.addstr('\nParsing and restoring null elements...')
        parse_win.refresh()
        semantic_win.clear()
        semantic_win.refresh()

        # Run the parse pipeline
        result = pipeline.parse(text)
        result_tree = Tree(result)

        # Output the longest parse that will fit. We try to draw the
        # possible output in order of decreasing length.
        parse_max_width = parse_win.getmaxyx()[1]
        possible_formats = (result_tree.pprint(margin=parse_max_width, force_multiline=True),
                            result_tree.pprint(margin=parse_max_width),
                            result)

        for formatted_result in possible_formats:
            parse_win.clear()
            try:
                parse_win.addstr(text + '\n')
                parse_win.addstr(formatted_result)
            except _curses.error:
                continue
            else:
                # We've successfully printed, stop trying formats
                break
        else:
            parse_win.clear()
            parse_win.addstr("Parse too large to show.\n")
        parse_win.refresh()

        # Do the same for semantics
        # Echo input and display status, after clearing the window
        semantic_win.clear()
        semantic_win.addstr(text)
        semantic_win.addstr('\nPerforming semantic analysis...')
        semantic_win.refresh()

        frames, new_commands, kb_response = process_parse_tree(result, text, kb)
        semantic_win.clear()
        try:
            if frames:
                semantic_win.addstr("Frames matched:\n")
                for frame in frames:
                    semantic_win.addstr("\t" + str(frame) + "\n")
            if new_commands:
                semantic_win.addstr("New commands:\n")
                for command in new_commands:
                    semantic_win.addstr(str(command) + "\n")
            if kb_response:
                semantic_win.addstr("KB response:\n")
                semantic_win.addstr(str(kb_response) + "\n")
            if not any((frames, new_commands, kb_response)):
                semantic_win.addstr("No frames matched.\n")
        except _curses.error:
            semantic_win.clear()
            semantic_win.addstr("Semantic representation too large to show.")
        semantic_win.refresh()

    return
コード例 #11
0
ファイル: specgeneration.py プロジェクト: uml-robotics/SLURP
    def generate(self, text, sensors, regions, props, tag_dict, realizable_reactions=False):
        """Generate a logical specification from natural language and propositions."""
        # Clean unicode out of everything
        text = text.encode('ascii', 'ignore')
        self.sensors = [astr.encode('ascii', 'ignore') for astr in sensors]
        self.regions = [astr.encode('ascii', 'ignore') for astr in regions]
        self.props = [astr.encode('ascii', 'ignore') for astr in props]
        self.tag_dict = {key.encode('ascii', 'ignore'):
                             [value.encode('ascii', 'ignore') for value in values]
                         for key, values in tag_dict.items()}

        print "NL->LTL Generation called on:"
        print "Sensors:", self.sensors
        print "Props:", self.props
        print "Regions:", self.regions
        print "Tag dict:", self.tag_dict
        print "Text:", repr(text)
        print

        # Make lists for POS conversions, including the metapar keywords
        force_nouns = list(self.regions) + list(self.sensors)
        force_verbs = list(self.props) + self.GOALS.keys()

        parse_client = PipelineClient()
        results = []
        responses = []
        custom_props = set()
        custom_sensors = set()
        self.generation_trees = OrderedDict()
        for line in text.split('\n'):
            # Strip the text before using it and ignore any comments
            line = line.strip()
            line = _remove_comments(line)

            if not line:
                # Blank lines are counted as being processed correctly but are skipped
                results.append(True)
                responses.append('')
                continue

            # Init the generation tree to the empty result
            generated_lines = defaultdict(list)
            self.generation_trees[line] = generated_lines

            print "Sending to remote parser:", repr(line)
            parse = parse_client.parse(line, force_nouns, force_verbs=force_verbs)
            print "Response from parser:", repr(parse)
            frames, new_commands, kb_response = process_parse_tree(parse, line, self.kbase)

            if SEMANTICS_DEBUG:
                print "Returned values from semantics:"
                print "Semantics results:"
                for frame in frames:
                    print "\t" + str(frame)
                print "New commands:", new_commands

            # Build the metapars
            # For now, assume success if there were commands or a kb_response
            success = bool(new_commands) or bool(kb_response)
            command_responses = [kb_response] if kb_response else []
            for command in new_commands:
                try:
                    new_sys_lines, new_env_lines, new_custom_props, new_custom_sensors = \
                        self._apply_metapar(command)
                except KeyError as err:
                    problem = \
                        "Could not understand {!r} due to error {}.".format(command.action, err)
                    print "ERROR: " + problem
                    command_responses.append(str(err))
                    success = False
                    continue
                else:
                    command_responses.append(respond_okay(command.action))

                # Add in the new lines
                generated_lines[_format_command(command)].extend(new_sys_lines)
                generated_lines[_format_command(command)].extend(new_env_lines)

                # Add custom props/sensors
                custom_props.update(new_custom_props)
                custom_sensors.update(new_custom_sensors)

            # If we've got no responses, say we didn't understand at all.
            if not command_responses:
                command_responses.append(respond_nocommand())

            # Add responses and successes
            results.append(success)
            responses.append(' '.join(command_responses))
            print

        # We need to modify non-reaction goals to be or'd with the reactions
        if realizable_reactions and self.react_props:
            # Dedupe and make an or over all the reaction properties
            reaction_or_frag = or_([sys_(prop) for prop in self.react_props])
            # HACK: Rewrite all the goals!
            # TODO: Test again once we re-enable reaction propositions
            for command_spec_lines in self.generation_trees.values():
                for spec_lines in command_spec_lines.values():
                    spec_lines.lines = [_insert_or_before_goal(reaction_or_frag, line)
                                        for line in spec_lines.lines]

        # Aggregate all the propositions
        # Identify goal numbers as we loop over sys lines
        sys_lines = []
        # The zeroth goal is always []<>(TRUE), so we skip it.
        goal_idx = 1
        for input_text, command_spec_lines in self.generation_trees.items():
            for command, spec_lines_list in command_spec_lines.items():
                for spec_lines in spec_lines_list:
                    if not spec_lines.issys():
                        continue
                    for line in spec_lines.lines:
                        spec_lines.input = input_text
                        sys_lines.append(line)
                        if isgoal(line):
                            spec_lines.goal_indices.add(goal_idx)
                            goal_idx += 1

        # Filter out any duplicates from the env_lines
        env_lines = OrderedDict()
        for command_spec_lines in self.generation_trees.values():
            for spec_lines_list in command_spec_lines.values():
                for spec_lines in spec_lines_list:
                    if not spec_lines.isenv():
                        continue
                    for line in spec_lines.lines:
                        env_lines[line] = None
        env_lines = env_lines.keys()

        # Convert sets to lists for the caller
        custom_props = list(custom_props)
        custom_sensors = list(custom_sensors)

        print "Spec generation complete."
        print "Results:", results
        print "Responses:", responses
        print "Environment lines:", env_lines
        print "System lines:", sys_lines
        print "Custom props:", custom_props
        print "Custom sensors:", custom_sensors
        print "Generation tree:", self.generation_trees
        return (env_lines, sys_lines, custom_props, custom_sensors, results, responses,
                self.generation_trees)
コード例 #12
0
ファイル: specgeneration.py プロジェクト: PennNLP/SLURP
    def generate(self, text, sensors, regions, props, tag_dict, realizable_reactions=True, verbose=True):
        """Generate a logical specification from natural language and propositions."""
        # Clean unicode out of everything
        text = text.encode("ascii", "ignore")
        self.sensors = [astr.encode("ascii", "ignore") for astr in sensors]
        self.regions = [astr.encode("ascii", "ignore") for astr in regions]
        self.props = [astr.encode("ascii", "ignore") for astr in props]
        self.tag_dict = {
            key.encode("ascii", "ignore"): [value.encode("ascii", "ignore") for value in values]
            for key, values in tag_dict.items()
        }

        if verbose:
            print "NL->LTL Generation called on:"
            print "Sensors:", self.sensors
            print "Props:", self.props
            print "Regions:", self.regions
            print "Tag dict:", self.tag_dict
            print "Text:", repr(text)
            print

        # Make lists for POS conversions, including the metapar keywords
        force_nouns = list(self.regions) + list(self.sensors)
        force_verbs = self.GOALS.keys()

        # Set up
        parse_client = PipelineClient()
        results = []
        responses = []
        custom_props = set()
        self.react_props = set()  # TODO: Make this a local
        custom_sensors = set()
        generation_trees = OrderedDict()

        # Add the actuator mutex
        if len(self.props) > 1:
            actuator_mutex = mutex_([sys_(prop) for prop in self.props], True)
            generation_trees["Safety assumptions"] = {
                "Safety assumptions": [
                    SpecChunk(
                        "Robot can perform only one action at a time.",
                        [actuator_mutex, always(actuator_mutex)],
                        SpecChunk.SYS,
                        None,
                    )
                ]
            }

        for line in text.split("\n"):
            # Strip the text before using it and ignore any comments
            line = line.strip()
            line = _remove_comments(line)

            if not line:
                # Blank lines are counted as being processed correctly but are skipped
                results.append(True)
                responses.append("")
                continue

            # Init the generation tree to the empty result
            generated_lines = OrderedDict()
            generation_trees[line] = generated_lines

            if verbose:
                print "Sending to remote parser:", repr(line)
            parse = parse_client.parse(line, force_nouns, force_verbs=force_verbs)
            if verbose:
                print "Response from parser:", repr(parse)
            frames, new_commands, kb_response = process_parse_tree(parse, line, self.kbase, quiet=True)

            frames, new_commands, kb_response = process_parse_tree(parse, line, self.kbase, quiet=not verbose)
            # Build the metapars
            # For now, assume success if there were commands or a kb_response
            success = bool(new_commands) or bool(kb_response)
            command_responses = [kb_response] if kb_response else []
            for command in new_commands:
                if COMMAND_DEBUG:
                    print "Processing command:"
                    print command
                try:
                    new_sys_lines, new_env_lines, new_custom_props, new_custom_sensors = self._apply_metapar(command)
                except KeyError as err:
                    cause = err.message
                    problem = "Could not understand {!r} due to error {}.".format(command.action, cause)
                    if verbose:
                        print >> sys.stderr, "Error: " + problem
                    command_responses.append(cause)
                    success = False
                    continue
                else:
                    command_responses.append(respond_okay(command.action))

                # Add in the new lines
                command_key = _format_command(command)
                if command_key not in generated_lines:
                    generated_lines[command_key] = []
                generated_lines[command_key].extend(new_sys_lines)
                generated_lines[command_key].extend(new_env_lines)

                # Add custom props/sensors
                custom_props.update(new_custom_props)
                custom_sensors.update(new_custom_sensors)

            # If we've got no responses, say we didn't understand at all.
            if not command_responses:
                command_responses.append(respond_nocommand())

            # Add responses and successes
            results.append(success)
            responses.append(" ".join(command_responses))
            # Add some space between commands
            if verbose:
                print

        if COMMAND_DEBUG:
            print "Generation trees:"
            for line, output in generation_trees.items():
                print line
                print output
                print

        # We need to modify non-reaction goals to be or'd with the reactions
        if realizable_reactions and self.react_props:
            # Dedupe and make an or over all the reaction properties
            reaction_or_frag = or_([sys_(prop) for prop in self.react_props])
            # HACK: Rewrite all the goals!
            # TODO: Test again with reaction propositions other than defuse
            for command_spec_chunks in generation_trees.values():
                for spec_chunks in command_spec_chunks.values():
                    for spec_chunk in spec_chunks:
                        if not spec_chunk.issys():
                            continue
                        spec_chunk.lines = [_insert_or_before_goal(reaction_or_frag, line) for line in spec_chunk.lines]

        # Aggregate all the propositions
        # Identify goal numbers as we loop over sys lines
        sys_lines = []
        # At the moment, there are no useless goals in specs, so we
        # begin at 0
        goal_idx = 0
        for input_text, command_spec_lines in generation_trees.items():
            for command, spec_lines_list in command_spec_lines.items():
                for spec_lines in spec_lines_list:
                    if not spec_lines.issys():
                        continue
                    for line in spec_lines.lines:
                        spec_lines.input = input_text
                        sys_lines.append(line)
                        if isgoal(line):
                            spec_lines.goal_indices.add(goal_idx)
                            goal_idx += 1

        # Filter out any duplicates from the env_lines
        env_lines = OrderedDict()
        for command_spec_lines in generation_trees.values():
            for spec_lines_list in command_spec_lines.values():
                for spec_lines in spec_lines_list:
                    if not spec_lines.isenv():
                        continue
                    for line in spec_lines.lines:
                        env_lines[line] = None
        env_lines = env_lines.keys()

        # Convert sets to lists for the caller
        custom_props = list(custom_props)
        custom_sensors = list(custom_sensors)

        if verbose:
            print "Spec generation complete."
            print "Results:", results
            print "Responses:", responses
            print "Environment lines:", env_lines
            print "System lines:", sys_lines
            print "Custom props:", custom_props
            print "Custom sensors:", custom_sensors
            print "Generation trees:", generation_trees

        return (env_lines, sys_lines, custom_props, custom_sensors, results, responses, generation_trees)
コード例 #13
0
ファイル: specgeneration.py プロジェクト: Shar-pei-bear/SLURP
    def generate(self, text, sensors, regions, props, tag_dict, realizable_reactions=True,
                 verbose=True):
        """Generate a logical specification from natural language and propositions."""
        # Clean unicode out of everything
        text = text.encode('ascii', 'ignore')
        self.sensors = [astr.encode('ascii', 'ignore') for astr in sensors]
        self.regions = [astr.encode('ascii', 'ignore') for astr in regions]
        self.props = [astr.encode('ascii', 'ignore') for astr in props]
        self.tag_dict = {key.encode('ascii', 'ignore'):
                         [value.encode('ascii', 'ignore') for value in values]
                         for key, values in tag_dict.items()}

        if verbose:
            print "NL->LTL Generation called on:"
            print "Sensors:", self.sensors
            print "Props:", self.props
            print "Regions:", self.regions
            print "Tag dict:", self.tag_dict
            print "Text:", repr(text)
            print

        # Make lists for POS conversions, including the metapar keywords
        force_nouns = list(self.regions) + list(self.sensors)
        force_verbs = self.GOALS.keys()

        # Set up
        parse_client = PipelineClient()
        results = []
        responses = []
        custom_props = set()
        self.react_props = set()  # TODO: Make this a local
        custom_sensors = set()
        generation_trees = OrderedDict()

        # Add the actuator mutex
        if len(self.props) > 1:
            actuator_mutex = mutex_([sys_(prop) for prop in self.props], True)
            generation_trees["Safety assumptions"] = \
                {"Safety assumptions":
                 [SpecChunk("Robot can perform only one action at a time.",
                            [actuator_mutex, always(actuator_mutex)],
                            SpecChunk.SYS, None)]}

        for line in text.split('\n'):
            # Strip the text before using it and ignore any comments
            line = line.strip()
            line = _remove_comments(line)

            if not line:
                # Blank lines are counted as being processed correctly but are skipped
                results.append(True)
                responses.append('')
                continue

            # Init the generation tree to the empty result
            generated_lines = OrderedDict()
            generation_trees[line] = generated_lines

            if verbose:
                print "Sending to remote parser:", repr(line)
            parse = parse_client.parse(line, force_nouns, force_verbs=force_verbs)
            if verbose:
                print "Response from parser:", repr(parse)
            frames, new_commands, kb_response = \
                process_parse_tree(parse, line, self.kbase, quiet=True)

            frames, new_commands, kb_response = process_parse_tree(parse, line, self.kbase,
                                                                   quiet=not verbose)
            # Build the metapars
            # For now, assume success if there were commands or a kb_response
            success = bool(new_commands) or bool(kb_response)
            command_responses = [kb_response] if kb_response else []
            for command in new_commands:
                if COMMAND_DEBUG:
                    print "Processing command:"
                    print command
                try:
                    new_sys_lines, new_env_lines, new_custom_props, new_custom_sensors = \
                        self._apply_metapar(command)
                except KeyError as err:
                    cause = err.message
                    problem = \
                        "Could not understand {!r} due to error {}.".format(command.action, cause)
                    if verbose:
                        print >> sys.stderr, "Error: " + problem
                    command_responses.append(cause)
                    success = False
                    continue
                else:
                    command_responses.append(respond_okay(command.action))

                # Add in the new lines
                command_key = _format_command(command)
                if command_key not in generated_lines:
                    generated_lines[command_key] = []
                generated_lines[command_key].extend(new_sys_lines)
                generated_lines[command_key].extend(new_env_lines)

                # Add custom props/sensors
                custom_props.update(new_custom_props)
                custom_sensors.update(new_custom_sensors)

            # If we've got no responses, say we didn't understand at all.
            if not command_responses:
                command_responses.append(respond_nocommand())

            # Add responses and successes
            results.append(success)
            responses.append(' '.join(command_responses))
            # Add some space between commands
            if verbose:
                print

        if COMMAND_DEBUG:
            print "Generation trees:"
            for line, output in generation_trees.items():
                print line
                print output
                print

        # We need to modify non-reaction goals to be or'd with the reactions
        if realizable_reactions and self.react_props:
            # Dedupe and make an or over all the reaction properties
            reaction_or_frag = or_([sys_(prop) for prop in self.react_props])
            # HACK: Rewrite all the goals!
            # TODO: Test again with reaction propositions other than defuse
            for command_spec_chunks in generation_trees.values():
                for spec_chunks in command_spec_chunks.values():
                    for spec_chunk in spec_chunks:
                        if not spec_chunk.issys():
                            continue
                        spec_chunk.lines = [_insert_or_before_goal(reaction_or_frag, line)
                                            for line in spec_chunk.lines]

        # Aggregate all the propositions
        # Identify goal numbers as we loop over sys lines
        sys_lines = []
        # At the moment, there are no useless goals in specs, so we
        # begin at 0
        goal_idx = 0
        for input_text, command_spec_lines in generation_trees.items():
            for command, spec_lines_list in command_spec_lines.items():
                for spec_lines in spec_lines_list:
                    if not spec_lines.issys():
                        continue
                    for line in spec_lines.lines:
                        spec_lines.input = input_text
                        sys_lines.append(line)
                        if isgoal(line):
                            spec_lines.goal_indices.add(goal_idx)
                            goal_idx += 1

        # Filter out any duplicates from the env_lines
        env_lines = OrderedDict()
        for command_spec_lines in generation_trees.values():
            for spec_lines_list in command_spec_lines.values():
                for spec_lines in spec_lines_list:
                    if not spec_lines.isenv():
                        continue
                    for line in spec_lines.lines:
                        env_lines[line] = None
        env_lines = env_lines.keys()

        # Convert sets to lists for the caller
        custom_props = list(custom_props)
        custom_sensors = list(custom_sensors)

        if verbose:
            print "Spec generation complete."
            print "Results:", results
            print "Responses:", responses
            print "Environment lines:", env_lines
            print "System lines:", sys_lines
            print "Custom props:", custom_props
            print "Custom sensors:", custom_sensors
            print "Generation trees:", generation_trees

        return (env_lines, sys_lines, custom_props, custom_sensors, results, responses,
                generation_trees)