def ilp_hedwig(input_dict): import hedwig format = input_dict['format'] suffix = '.' + format bk_suffix = suffix if format == 'csv': bk_suffix = '.tsv' # Writes examples file data_file = tempfile.NamedTemporaryFile(delete=False, suffix=format) data_file.write(input_dict['examples']) data_file.close() # Write BK files to BK dir bk_dir = tempfile.mkdtemp() if format == 'csv': suffix = 'tsv' for bk_file in input_dict['bk_file']: tmp_bk_file = tempfile.NamedTemporaryFile(delete=False, dir=bk_dir, suffix=bk_suffix) tmp_bk_file.write(bk_file) tmp_bk_file.close() output_file = tempfile.NamedTemporaryFile(delete=False) hedwig.run({ 'bk_dir': bk_dir, 'data': data_file.name, 'format': format, 'output': output_file.name, 'mode': 'subgroups', 'target': input_dict['target'] if 'target' in input_dict else None, 'score': input_dict['score'], 'negations': input_dict['negations'] == 'true', 'alpha': float(input_dict['alpha']), 'adjust': input_dict['adjust'], 'FDR': float(input_dict['fdr']), 'leaves': input_dict['leaves'] == 'true', 'learner': 'heuristic', 'optimalsubclass': input_dict['optimalsubclass'] == 'true', 'uris': input_dict['uris'] == 'true', 'beam': int(input_dict['beam']), 'support': float(input_dict['support']), 'depth': int(input_dict['depth']), 'nocache': True, 'covered': None }) rules = open(output_file.name).read() return {'rules': rules}
def ilp_hedwig(input_dict): import hedwig format = input_dict['format'] ont_format = '.tsv' if format == 'csv' else '.' + format examples_file = tempfile.NamedTemporaryFile(suffix='.' + format, delete=False) examples_file.write(input_dict['examples']) examples_file.close() bk_dir = tempfile.mkdtemp() for bk in input_dict['bk']: f = tempfile.NamedTemporaryFile(suffix=ont_format, delete=False, dir=bk_dir) f.write(bk) f.close() result = hedwig.run({ 'data': examples_file.name, 'bk_dir': bk_dir, 'adjust': input_dict['adjust'], 'FDR': float(input_dict['fdr']), 'format': format, 'support': float(input_dict['sup']), 'learner': input_dict['learner'], 'depth': int(input_dict['depth']), 'optimalsubclass': input_dict['optimal'] == "true", 'beam': int(input_dict['beam']), 'alpha': float(input_dict['alpha']), 'score': input_dict['score_fun'], 'uris': input_dict['uris'], 'negations': input_dict['negations'] == "true", # Presets 'leaves': True, 'covered': None, 'target': None, 'mode': 'subgroups', 'output': 'foo.txt', 'verbose': False, 'nocache': True }) return {'rules': result}
parser.add_argument('-b', '--beam', default=Defaults.BEAM_SIZE, type=int, help='Beam size.') parser.add_argument('-S', '--support', default=Defaults.SUPPORT, type=float, help='Minimum support.') parser.add_argument('-d', '--depth', default=Defaults.DEPTH, type=int, help='Maximum number of conjunctions.') parser.add_argument('-C', '--nocache', action='store_true', help='Don\'t cache background knowledge graph files.') parser.add_argument("-v", "--verbose", help="Increase output verbosity.", action="store_true") args = parser.parse_args() hedwig.run(args.__dict__, cli=True)
help='Max false discovery rate; applies only if \ "--adjust fdr" is used.', ) parser.add_argument("-l", "--leaves", action="store_true", help="Use instance names in rule conjunctions.") parser.add_argument( "-L", "--learner", choices=["heuristic", "optimal"], default="heuristic", help="Type of learner to use." ) parser.add_argument( "-O", "--optimalsubclass", action="store_true", help="In each step the full hierarchy under a particular \ concept is searched", ) parser.add_argument("-u", "--uris", action="store_true", help="Show URIs in rule conjunctions.") parser.add_argument("-b", "--beam", default="20", type=int, help="Beam size.") parser.add_argument("-S", "--support", default="0.1", type=float, help="Minimum support.") parser.add_argument("-d", "--depth", default="5", type=int, help="Maximum number of conjunctions.") parser.add_argument("-v", "--verbose", help="Increase output verbosity.", action="store_true") args = parser.parse_args() hedwig.run(args.__dict__, cli=True)