def call_apply_link_async_in_schema(self, times): scheme_eval(self.atomspace, ''' (use-modules (opencog) (opencog exec)) (define (async-call f n) (call-with-new-thread f) (if (> n 1) (async-call f (- n 1)) )) (async-call (lambda () (begin (define apply-link (ApplyLink (MethodOfLink (GroundedObjectNode "point") (ConceptNode "move")) (ListLink (GroundedObjectNode "x") (GroundedObjectNode "y")))) (cog-execute! apply-link) )) %d) (usleep 100) ''' % times)
def pln_bc(self, query, vardecl=None, maxiter=10, rules=[]): """Call PLN backward chainer with the given query and parameters. The parameters are maxiter: the maximum number of iterations. rules: optional list of rule symbols. If empty keep current rule set. Return a python list of solutions. """ agent_log.fine("pln_bc(query={}, maxiter={})".format(query, maxiter)) # Add rules (should be previously loaded) if rules: scheme_eval(self.atomspace, "(pln-rm-all-rules)") for rule in rules: er = scheme_eval(self.atomspace, "(pln-add-rule '" + rule + ")") agent_log.info("(pln-add-rule '" + rule + ")") agent_log.info("er = " + str(er)) # Generate and run query command = "(pln-bc " command += str(query) command += ("#:vardecl " + str(vardecl)) if vardecl else "" command += " #:maximum-iterations " + str(maxiter) command += ")" return scheme_eval_h(self.atomspace, command).out
def setUp(self): global shared_space self.space = shared_space scheme_eval( self.space, '(add-to-load-path "' + os.environ['PROJECT_SOURCE_DIR'] + '")') scheme_eval( self.space, '(add-to-load-path "' + os.environ['PROJECT_SOURCE_DIR'] + '/opencog/scm")')
def test_bogus_scheme(self): try: code = """(Get (Concept "a") (Concept "a") (Concept "a"))""" scheme_eval(self.space, code) self.assertFalse("call should fail") except RuntimeError as e: # Use `nosetests3 --nocapture` to see this print... print("The exception message is " + str(e)) self.assertTrue("Expecting" in str(e))
def test_bogus_path(self): try: code = """(load-from-path "/blargle/Betelgeuse")""" scheme_eval(self.space, code) self.assertFalse("call should fail") except RuntimeError as e: # Use `nosetests3 --nocapture` to see this print... print("The exception message is " + str(e)) self.assertTrue("Unable to find" in str(e))
def test_unifier(self): scheme_eval(self.space, "(use-modules (opencog exec))") question = scheme_eval_h(self.space, "find-animals") self.assertTrue(question) print("\nThe question is:", question) answer = scheme_eval_h(self.space, "(cog-execute! find-animals)") self.assertTrue(answer) print("\nThe answer is:", answer) self.assertEqual(answer.type, types.SetLink) self.assertEqual(answer.arity, 3)
def pln_load_rules(self, rules=[]): """Load PLN rules. Take a list of rule scheme symbols (but without the single quote for the symbol), such as ["back-predictive-implication-scope-direct-evaluation", "back-predictive-implication-scope-deduction-cogscm"] """ for rule in rules: scheme_eval(self.atomspace, "(pln-load-rule '" + rule + ")")
def load_opencog_modules(self): # Load miner scheme_eval(self.atomspace, "(use-modules (opencog miner))") scheme_eval(self.atomspace, "(miner-logger-set-level! \"fine\")") # scheme_eval(self.atomspace, "(miner-logger-set-sync! #t)") # Load PLN scheme_eval(self.atomspace, "(use-modules (opencog pln))") # scheme_eval(self.atomspace, "(pln-load-rule 'predictive-implication-scope-direct-introduction)") scheme_eval( self.atomspace, "(pln-load-rule 'predictive-implication-scope-direct-evaluation)") # No need of predictive implication for now # scheme_eval(self.atomspace, "(pln-load-rule 'predictive-implication-direct-evaluation)") scheme_eval(self.atomspace, "(pln-log-atomspace)")
def start_annotation(**kwargs): logger = logging.getLogger("annotation-service") try: mnemonic = kwargs["mnemonic"] path = os.path.join(RESULT_DIR, mnemonic) if not os.path.exists(path): os.makedirs(path) response = annotate(atomspace, kwargs["payload"]["annotations"], kwargs["payload"]["genes"], mnemonic) if "#f" in response: not_found = response[4:].split(" ") res = [] for n in not_found: res.append({"symbol": n, "current": "", "similar": ""}) return False, json.dumps(res) else: logger.info("when executing atoms:" + scheme_eval(atomspace, "(count-all)").decode("utf-8")) json_file = os.path.join(path, mnemonic + ".json") logger.info("Applying Multi-level Layout") graph_processor = GraphProcessor(json_file) graph_processor.process() csv_file = to_csv(mnemonic) logger.info(csv_file) return True, None except Exception as ex: msg = "Error: " + ex.__str__() logger.error(msg) print(traceback._cause_message) return False, msg
def find_interactions(node, atomspace): # Finds and returns interactions of the given bio-entity with others, the type of interaction # and their count interaction_result = \ ''' (let ([interactions (list "expresses" "interacts_with" "binding" "reaction" "inhibition" "activation" "expression" "catalysis" "ptmod" "GO_regulates" "GO_positively_regulates" "GO_negatively_regulates" "has_part" "has_role" "translated_to" "transcribed_to" )] [node {}]) (string-join (map (lambda (i) (let ([result (cog-outgoing-set (cog-execute! (GetLink (ChoiceLink (Evaluation (PredicateNode i) (ListLink node (Variable "$n"))) (Evaluation (PredicateNode i) (ListLink (Variable "$n") node)) (Evaluation (PredicateNode i) (SetLink node (Variable "$n")))))))]) (if (null? result) "" (string-join (list i (string-join (map (lambda (x) (cog-name x))result) ",")) ":"))) )interactions) " ")) '''.format(node) interaction_result = scheme_eval(atomspace, interaction_result).decode("utf-8") interaction_result = "\n".join(interaction_result.split()) count = {} if interaction_result: for i in interaction_result.split("\n"): interaction = i.split(":") count[interaction[0]] = len(interaction[1].split(",")) return interaction_result, count
def check_gene_availability(atomspace, genes): logger = logging.getLogger("annotation-service") genes = generate_gene_function(genes) logger.info("checking genes : " + genes) logger.info(genes) genes_fn = "(find-genes {gene_list})".format(gene_list=genes) gene_result = scheme_eval(atomspace, genes_fn).decode('utf-8') gene_dict = json.loads(gene_result) return gene_result, len(gene_dict) == 0
def find_locations(node, atomspace): # Finds cellular location of the given bio-entity locations = \ ''' (let ([loc (cog-outgoing-set (cog-execute! (GetLink (Evaluation (PredicateNode "has_location") (ListLink {} (Variable "$loc"))))))]) (if (null? loc) "" (string-join (map (lambda (l) (cog-name l)) loc) ","))) '''.format(node) locations = scheme_eval(atomspace, locations).decode("utf-8") return locations if locations else None
def find_txt_name(node, atomspace): # Finds the text name of the bio-entity name = \ ''' (let ([name (cog-outgoing-set (cog-execute! (GetLink (Evaluation (PredicateNode "has_name") (ListLink {} (Variable "$name"))))))]) (if (null? name) "" (cog-name (car name)))) '''.format(node) name = scheme_eval(atomspace, name).decode("utf-8") return name if len(name) > 1 else None
def test_c_gc(self): print("Enter garbage-collection-test\n") status = scheme_eval(self.space, '(define n 0)') self.assertTrue(status) status = scheme_eval( self.space, """ (for-each (lambda (y) (let* ((bigstr (list->string (map (lambda (x) (integer->char (+ 48 (modulo (+ x y) 79)))) (iota 900)))) (biglst (string->list bigstr)) (revstr (reverse-list->string biglst))) (set! n (+ 1 n)))) (iota 2000))""") self.assertTrue(status) status = scheme_eval(self.space, '(gc-stats)') self.assertTrue(status) print("Finish garbage-collection-test\n")
def load_opencog_modules(self): # Init loggers log.set_level("debug") # log.set_sync(True) agent_log.set_level("debug") # agent_log.set_sync(True) ure_logger().set_level("info") # ure_logger().set_sync(True) # Load miner scheme_eval(self.atomspace, "(use-modules (opencog miner))") scheme_eval(self.atomspace, "(miner-logger-set-level! \"fine\")") # scheme_eval(self.atomspace, "(miner-logger-set-sync! #t)") # Load PLN scheme_eval(self.atomspace, "(use-modules (opencog pln))") # scheme_eval(self.atomspace, "(pln-load-rule 'predictive-implication-scope-direct-introduction)") scheme_eval(self.atomspace, "(pln-load-rule 'predictive-implication-scope-direct-evaluation)") # No need of predictive implication for now # scheme_eval(self.atomspace, "(pln-load-rule 'predictive-implication-direct-evaluation)") scheme_eval(self.atomspace, "(pln-log-atomspace)")
def test_b_load_file(self): print("Enter load-file test\n") status = scheme_eval( self.space, '(load-from-path "tests/cython/guile/basic_unify.scm")') self.assertTrue(status) print("Loaded file\n") a1 = self.space.add_node(types.ConceptNode, "hello") self.assertTrue(a1) print("Added atom\n") # Make sure the truth value is what's in the SCM file. expected = TruthValue(0.5, 0.5) self.assertEquals(a1.tv, expected) print("Got=" + str(a1.tv) + " expected=" + str(expected))
def load_opencog_modules(self): # Load miner scheme_eval(self.atomspace, "(use-modules (opencog miner))") # Load PLN. All rules must be pre-loaded here scheme_eval(self.atomspace, "(use-modules (opencog pln))") scheme_eval(self.atomspace, "(use-modules (opencog spacetime))") rules = [ "back-predictive-implication-scope-direct-evaluation", "back-predictive-implication-scope-deduction-cogscm" ] self.pln_load_rules(rules)
def annotate(atomspace, annotations, genes, mnemonic): """ Performs annotation according to a list of annotations given on a list of genes :param atomspace: the atomspace that contains the loaded knowledge bases where the annotations will be performed from :param annotations: a list of annotations :param genes: a list of genes. :return: a string response directly from the scheme_eval response decoded in utf-8 """ logger = logging.getLogger("annotation-service") logger.info(annotations) genes_list = generate_gene_function(genes) parse_function = "(annotate-genes {genes} \"{session}\" \"{request}\")".format( genes=genes_list, request=json.dumps(annotations).replace('"', '\\"'), session=mnemonic) logger.info(parse_function) response = scheme_eval(atomspace, parse_function).decode("utf-8") logger.info("Finished annotation") return response
def set_sync(self, sync: bool) -> None: cmd_str = "(miner-logger-set-sync! " + to_scheme_str(sync) + ")" scheme_eval(self.atomspace, cmd_str)
def set_level(self, level: str) -> None: cmd_str = '(miner-logger-set-level! "' + level + '")' scheme_eval(self.atomspace, cmd_str)
def __init__(self, atomspace: AtomSpace): self.atomspace = atomspace scheme_eval(self.atomspace, "(use-modules (opencog miner))")
atomspace = AtomSpace() initialize_opencog(atomspace) executed = False def add_link(atom1, atom2): global executed link = ListLink(atom1, atom2) executed = True return link # Module for cog-execute! scheme_eval(atomspace, '(use-modules (opencog exec))') execute_code = \ ''' (cog-execute! (ExecutionOutputLink (GroundedSchemaNode \"py: add_link\") (ListLink (ConceptNode \"one\") (ConceptNode \"two\") ) ) ) ''' scheme_eval(atomspace, execute_code)
def test_a_load_core_types(self): scheme_eval(self.space, "(use-modules (opencog))")
def scm(atomese): return scheme_eval(atomspace, atomese).decode("utf-8")
def to_csv(file_dir, main_nodes=False): start = time.time() if main_nodes: main_nodes = [i["geneName"] for i in main_nodes] path = os.path.join(RESULT_DIR, file_dir) input_file = os.path.join(path, "result.scm") atomspace = AtomSpace() modules = "(use-modules (opencog) (opencog bioscience) (opencog persist-file) (opencog exec))" scheme_eval(atomspace, modules) scheme_eval(atomspace, "(load-file \"{}\")".format(input_file)) df_columns = [ "Bio-entity", "Type", "Name", "Source db", "Member_of", "Has_members", "Inherits_from", "Has_inherits", "Interaction/type", "Cell Location" ] df1 = pd.DataFrame([], columns=df_columns) df2 = pd.DataFrame([], columns=df_columns) summary = dict() cross_an = dict() main_input = dict() # Find all Biological entities bio_types = scheme_eval(atomspace, "(cog-get-types)").decode("utf-8") bio_types = bio_types.replace("(", "").replace(")", "").split(" ") bio_types = [i for i in bio_types if "Node" in i] molecules = [ "Uniprot", "Gene", "Chebi", "Pubchem", "Drubank", "Refseq", "Enst" ] atoms = [] atoms_count = [{}] for t in bio_types: try: atom = atomspace.get_atoms_by_type(getattr(types, t)) if len(atom) > 0 and not t in [ "Node", "PredicateNode", "ConceptNode" ]: atoms_count[0].update({t.replace("Node", ""): len(atom)}) atoms = atoms + atom except: continue for i, val in enumerate(atoms): txt_name = find_txt_name(val, atomspace) if txt_name: node = val.name node_type = val.type_name.replace("Node", "") source = generate_url(node, node_type) count = {} member_of, has_members, count_mem = find_inherits_or_members( val, atomspace, linktype="member") count.update(count_mem) inherits_from, has_inherits, count_inh = find_inherits_or_members( val, atomspace) count.update(count_inh) location = find_locations(val, atomspace) if node_type in molecules: interacts_with, count_int = find_interactions(val, atomspace) count.update(count_int) if main_nodes and node in main_nodes: main_input[node] = [count] else: cross_an[node] = [count] df1.loc[len(df1)] = [ node, node_type, txt_name, source, member_of, has_members, inherits_from, has_inherits, interacts_with, location ] else: interacts_with = None df2.loc[len(df2)] = [ node, node_type, txt_name, source, member_of, has_members, inherits_from, has_inherits, interacts_with, location ] summary["A Reference Databases"] = "mozi.ai/datasets/" summary["Cross Annotations"] = cross_an summary["Input Genes"] = main_input summary["Total count"] = {"Count": atoms_count} with open(os.path.join(path, "summary.json"), "w") as j: json.dump(summary, j) df1 = filter_df(df1) if len(df1) > 0: df1.to_csv(os.path.join(path, "result1.csv"), index=False) df2 = filter_df(df2) if len(df2) > 0: df2.to_csv(os.path.join(path, "result2.csv"), index=False) end = time.time() return "Time to parse atomese to csv and generate summary: {}".format( end - start)