Ejemplo n.º 1
0
    def get_single_tptp_file(self, imports=None):
        """translate the module and all imported modules to a single TPTP file."""

        # if the given imports are identical to the modules imports, treat it as the modules imports were used
        if imports and set(self.imports).issubset(imports) and set(
                self.imports).issuperset(imports):
            imports = None

        ending = ""

        # avoid redundant work if we already have the tptp file and didn't add a lemma module
        if not imports:
            if len(self.tptp_file_name) > 0 and self.lemma_module is None:
                return self.tptp_file_name
            ending = filemgt.read_config('tptp', 'all_ending')
            name = self.module_name
        else:
            ending = filemgt.read_config('tptp', 'select_ending')
            name = imports[0].module_name
        # construct the final ending
        ending += filemgt.read_config('tptp', 'ending')

        tptp_file_name = filemgt.get_full_path(name,
                                               folder=filemgt.read_config(
                                                   'tptp', 'folder'),
                                               ending=ending)

        if not imports:
            self.tptp_file_name = tptp_file_name
            imports = self.get_imports().copy()

        tptp_sentences = []
        if self.lemma_module:
            imports.remove(self.lemma_module)
            tptp_sentences.append(self.lemma_module.tptp_sentence)

        files_to_translate = [i.clif_processed_file_name for i in imports]
        while None in files_to_translate:
            files_to_translate.remove(None)
        tptp_sentences.extend(
            clif.translate_sentences(files_to_translate, "TPTP"))
        tptp_file = open(tptp_file_name, 'w')
        tptp_file.writelines([t + "\n" for t in tptp_sentences])
        tptp_file.close()

        logging.getLogger(__name__).info("CREATED TPTP TRANSLATION: " +
                                         tptp_file_name)

        return tptp_file_name
Ejemplo n.º 2
0
    def __init__(self, name, reasoner_type=None, reasoner_id=None):
        self.identifier = ''

        self.type = Reasoner.PROVER

        self.args = []

        self.positive_returncodes = []

        self.unknown_returncodes = []

        self.modules = []

        self.input_files = ''

        self.output_file = ''

        self.time = -1

        self.return_code = None

        self.output = None

        self.name = name
        if reasoner_type:
            self.type = reasoner_type
        if reasoner_id:
            self.identifier = reasoner_id
        else:
            self.identifier = name
        self.positive_returncodes = commands.get_positive_returncodes(
            self.name)
        self.unknown_returncodes = commands.get_unknown_returncodes(self.name)

        self.timeout = filemgt.read_config(self.name, 'timeout')
Ejemplo n.º 3
0
    def add_lemmas(self):

        # first get LADR translation of ClifModule
        # logging.getLogger(__name__).debug("CREATING LADR TRANSLATION OF LEMMA: " + self.module.module_name)
        #self.module.get_p9_file_name()

        (lemma_names,
         ladr_files) = ladr.get_ladr_goal_files(self.module.get_p9_file_name(),
                                                self.module.module_name)
        logging.getLogger(
            __name__).debug("CREATED LADR TRANSLATION OF LEMMA: " +
                            self.module.get_p9_file_name())

        # translate to tptp as goal
        tptp_sentences = clif.to_tptp([self.module.clif_processed_file_name],
                                      axiom=False)

        #        for t in tptp_sentences:
        #            print t

        # populate the list of lemmas
        for i in range(0, len(ladr_files)):
            name = os.path.join(
                os.path.dirname(lemma_names[i]),
                os.path.basename(ladr_files[i].replace(
                    filemgt.read_config('ladr', 'ending'), '')))
            #print "NAME = " + name
            m = LemmaModule(name, ladr_files[i], tptp_sentences[i])
            self.lemmas.append(m)
Ejemplo n.º 4
0
 def constructCommand(self, modules, outfile_stem):
     """Construct the command to invoke the reasoner."""
     self.modules = modules
     self.output_file = outfile_stem + filemgt.read_config(
         self.name, 'ending')
     (self.args, self.input_files) = commands.get_system_command(
         self.name, modules, self.output_file)
     return self.args
Ejemplo n.º 5
0
    def detect_systems(self):
        """Read the active provers from the configuration file."""

        # local variables
        provers = filemgt.read_config('active', 'provers').split(',')
        finders = filemgt.read_config('active', 'modelfinders').split(',')

        provers = [s.strip() for s in provers]
        finders = [s.strip() for s in finders]

        provers = [x for x in provers if len(x) > 0]
        finders = [x for x in finders if len(x) > 0]

        self.extend([Reasoner(r) for r in provers])
        self.extend([
            Reasoner(r, reasoner_type=Reasoner.MODEL_FINDER) for r in finders
        ])

        logging.getLogger(__name__).debug("REASONER SET: " +
                                          str(provers + finders))

        return True
Ejemplo n.º 6
0
    def get_single_ladr_file(self, imports=None):
        """get the ClifModuleSet as a single file in LADR syntax."""

        # if the given imports are identical to the modules imports, treat it as the modules imports were used
        if imports and set(self.imports).issubset(imports) and set(
                self.imports).issuperset(imports):
            imports = None

        # avoid redundant work if we already have the ladr file
        if not imports and len(self.p9_file_name) > 0:
            return self.p9_file_name

        ending = ""
        if not imports:
            ending = filemgt.read_config('ladr', 'all_ending')
            name = self.module_name
        else:
            ending = filemgt.read_config('ladr', 'select_ending')
            name = imports[0].module_name
        # construct the final ending
        ending += filemgt.read_config('ladr', 'ending')

        p9_files = self.get_ladr_files(imports)

        p9_file_name = filemgt.get_full_path(name,
                                             folder=filemgt.read_config(
                                                 'ladr', 'folder'),
                                             ending=ending)
        if not imports:
            self.p9_file_name = p9_file_name

        #print "FILE NAME:" + self.p9_file_name
        # TODO: need to initialize self.replaceable_symbols
        ladr.cumulate_ladr_files(p9_files, p9_file_name)
        logging.getLogger(__name__).info("CREATED SINGLE LADR TRANSLATION: " +
                                         p9_file_name)
        return p9_file_name
Ejemplo n.º 7
0
    def extract_p9_predicates_and_functions(self):

        #print 'extract predicates and functions'
        prover9args = 'prover9 -t 0 -f '

        for f in self.imports:
            prover9args += f.p9_file_name + ' '

        options_file = commands.get_p9_empty_optionsfile(
            self.get_p9_file_name(), verbose=False)
        prover9args += ' ' + options_file + ' '

        # would be better to create a temporary file or read the output stream directly
        temp_file = self.get_module_name() + '_order' + filemgt.read_config(
            'ladr', 'ending')
        prover9args += ' > ' + temp_file
        logging.getLogger(__name__).debug(prover9args)
        process.executeSubprocess(prover9args)

        order_file = open(temp_file, 'r')
        line = order_file.readline()
        predicates = None
        functions = None
        while line:
            if line.find('predicate_order') > -1:
                predicateline = line[line.find('predicate_order([') +
                                     len('predicate_order([') + 1:-4]
                predicates = predicateline.split()
                for i in range(len(predicates)):
                    predicates[i] = predicates[i].replace(',', '')
                line = order_file.readline()
                functionline = line[line.find('function_order([') +
                                    len('function_order([') + 1:-4]
                functions = functionline.split()
                for i in range(len(functions)):
                    functions[i] = functions[i].replace(',', '')
                break
            line = order_file.readline()

        order_file.close()
        #print 'temp file : ' + temp_file
        #print 'options file : ' + options_file
        os.remove(temp_file)
        os.remove(options_file)
        if predicates and functions:
            return (predicates, functions)
        else:
            return ([], [])
Ejemplo n.º 8
0
    def run_module_consistency_check(self, module):
        """check a single module for consistency."""
        outfile_stem = filemgt.get_full_path(module.module_name,
                                             folder=filemgt.read_config(
                                                 'output', 'folder'))

        reasoners = ReasonerSet()
        reasoners.constructAllCommands([module], outfile_stem)
        logging.getLogger(__name__).info("USING " + str(len(reasoners)) +
                                         " REASONERS: " +
                                         str([r.name for r in reasoners]))

        # run provers and modelfinders simultaneously and wait until one returns
        reasoners = process.raceProcesses(reasoners)

        (return_value, _) = self.consolidate_results(reasoners)
        self.pretty_print_result(module.module_name, return_value)

        return return_value
Ejemplo n.º 9
0
    def run_simple_consistency_check(self,
                                     module_name=None,
                                     modules=None,
                                     options_files=None):
        """ test the input for consistency by trying to find a model or an inconsistency."""
        # want to create a subfolder for the output files
        outfile_stem = filemgt.get_full_path(self.module_name,
                                             folder=filemgt.read_config(
                                                 'output', 'folder'))

        if not module_name:
            module_name = self.module_name

        if not modules:
            modules = self.imports  # use all imports as default set of modules

        reasoners = ReasonerSet()
        reasoners.constructAllCommands(modules, outfile_stem)
        logging.getLogger(__name__).info("USING " + str(len(reasoners)) +
                                         " REASONERS: " +
                                         str([r.name for r in reasoners]))

        # run provers and modelfinders simultaneously and wait until one returns
        reasoners = process.raceProcesses(reasoners)

        # this captures our return code (consistent/inconsistent/unknown), not the reasoning processes return code
        (return_value, fastest_reasoner) = self.consolidate_results(reasoners)

        if len(modules) == 0:
            self.pretty_print_result(module_name + " (without imports)",
                                     return_value)
        else:
            self.pretty_print_result(
                module_name + " (with imports = " + str(modules) + ")",
                return_value)

        results = []
        results.append((tuple(modules), return_value, fastest_reasoner))
        #print str(results)
        return results